[llvm] 04fff54 - [AMDGPU] Move call clobbered return address registers s[30:31] to callee saved range

Venkata Ramanaiah Nalamothu via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 8 22:48:14 PST 2022


Author: Venkata Ramanaiah Nalamothu
Date: 2022-03-09T12:18:02+05:30
New Revision: 04fff547e281a79c4aca6d0e8aa3a614e46625ee

URL: https://github.com/llvm/llvm-project/commit/04fff547e281a79c4aca6d0e8aa3a614e46625ee
DIFF: https://github.com/llvm/llvm-project/commit/04fff547e281a79c4aca6d0e8aa3a614e46625ee.diff

LOG: [AMDGPU] Move call clobbered return address registers s[30:31] to callee saved range

Currently the return address ABI registers s[30:31], which fall in the call
clobbered register range, are added as a live-in on the function entry to
preserve its value when we have calls so that it gets saved and restored
around the calls.

But the DWARF unwind information (CFI) needs to track where the return address
resides in a frame and the above approach makes it difficult to track the
return address when the CFI information is emitted during the frame lowering,
due to the involvment of understanding the control flow.

This patch moves the return address ABI registers s[30:31] into callee saved
registers range and stops adding live-in for return address registers, so that
the CFI machinery will know where the return address resides when CSR
save/restore happen during the frame lowering.

And doing the above poses an issue that now the return instruction uses undefined
register `sgpr30_sgpr31`. This is resolved by hiding the return address register
use by the return instruction through the `SI_RETURN` pseudo instruction, which
doesn't take any input operands, until the `SI_RETURN` pseudo gets lowered to the
`S_SETPC_B64_return` during the `expandPostRAPseudo()`.

As an added benefit, this patch simplifies overall return instruction handling.

Note: The AMDGPU CFI changes are there only in the downstream code and another
version of this patch will be posted for review for the downstream code.

Reviewed By: arsenm, ronlieb

Differential Revision: https://reviews.llvm.org/D114652

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
    llvm/lib/Target/AMDGPU/AMDGPUCallingConv.td
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
    llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
    llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
    llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
    llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
    llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
    llvm/lib/Target/AMDGPU/SIInstructions.td
    llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
    llvm/lib/Target/AMDGPU/SIRegisterInfo.td
    llvm/lib/Target/AMDGPU/SOPInstructions.td
    llvm/test/CodeGen/AMDGPU/GlobalISel/assert-align.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-mul-post-legalize.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-mul-pre-legalize.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-imm-chain-illegal-types.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/dropped_debug_info_assert.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/dummy-target.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f32.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f64.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/fpow.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/global-value.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/image-waterfall-loop-O0.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-mismatched-size.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-assert-align.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-atomicrmw.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-abi-attribute-hints.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-non-fixed.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-return-values.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-constantexpr.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-constrained-fp.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fixed-function-abi-vgpr-args.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-getelementptr.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-memory-intrinsics.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-metadata.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-ptrmask.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sat.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-tail-call.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-vector-args-gfx7.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-vector-args-gfx8-plus.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ds.fmax.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/load-unaligned.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/regbankcombiner-smed3.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/regbankcombiner-umed3.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
    llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll
    llvm/test/CodeGen/AMDGPU/addrspacecast-known-non-null.ll
    llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
    llvm/test/CodeGen/AMDGPU/amdpal-callable.ll
    llvm/test/CodeGen/AMDGPU/branch-relax-spill.ll
    llvm/test/CodeGen/AMDGPU/call-graph-register-usage.ll
    llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll
    llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll
    llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs-fixed-abi.ll
    llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
    llvm/test/CodeGen/AMDGPU/dagcombine-lshr-and-cmp.ll
    llvm/test/CodeGen/AMDGPU/divergence-driven-trunc-to-i1.ll
    llvm/test/CodeGen/AMDGPU/fdiv-nofpexcept.ll
    llvm/test/CodeGen/AMDGPU/fix-frame-ptr-reg-copy-livein.ll
    llvm/test/CodeGen/AMDGPU/fpow.ll
    llvm/test/CodeGen/AMDGPU/frame-setup-without-sgpr-to-vgpr-spills.ll
    llvm/test/CodeGen/AMDGPU/fshr.ll
    llvm/test/CodeGen/AMDGPU/gfx-call-non-gfx-func.ll
    llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
    llvm/test/CodeGen/AMDGPU/gfx-callable-preserved-registers.ll
    llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll
    llvm/test/CodeGen/AMDGPU/indirect-call.ll
    llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
    llvm/test/CodeGen/AMDGPU/ipra-return-address-save-restore.ll
    llvm/test/CodeGen/AMDGPU/ipra.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.implicitarg.ptr.ll
    llvm/test/CodeGen/AMDGPU/llvm.powi.ll
    llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll
    llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
    llvm/test/CodeGen/AMDGPU/nested-calls.ll
    llvm/test/CodeGen/AMDGPU/no-remat-indirect-mov.mir
    llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
    llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir
    llvm/test/CodeGen/AMDGPU/save-fp.ll
    llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
    llvm/test/CodeGen/AMDGPU/sibling-call.ll
    llvm/test/CodeGen/AMDGPU/splitkit-copy-bundle.mir
    llvm/test/CodeGen/AMDGPU/stack-realign.ll
    llvm/test/CodeGen/AMDGPU/strict_fadd.f16.ll
    llvm/test/CodeGen/AMDGPU/strict_fmul.f16.ll
    llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
    llvm/test/CodeGen/AMDGPU/tail-call-amdgpu-gfx.ll
    llvm/test/CodeGen/AMDGPU/udiv.ll
    llvm/test/CodeGen/AMDGPU/udiv64.ll
    llvm/test/CodeGen/AMDGPU/unstructured-cfg-def-use-issue.ll
    llvm/test/CodeGen/AMDGPU/urem-seteq-illegal-types.ll
    llvm/test/CodeGen/AMDGPU/urem64.ll
    llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll
    llvm/test/CodeGen/AMDGPU/wave32.ll
    llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
    llvm/test/CodeGen/MIR/AMDGPU/machine-metadata.mir
    llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir
    llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_isel.ll.expected

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
index cd084fd5440a5..96166e62f7c5b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
@@ -349,7 +349,6 @@ bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val,
                                      FunctionLoweringInfo &FLI) const {
 
   MachineFunction &MF = B.getMF();
-  MachineRegisterInfo &MRI = MF.getRegInfo();
   SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
   MFI->setIfReturnsVoid(!Val);
 
@@ -365,40 +364,15 @@ bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B, const Value *Val,
     return true;
   }
 
-  auto const &ST = MF.getSubtarget<GCNSubtarget>();
-
-  unsigned ReturnOpc = 0;
-  if (IsShader)
-    ReturnOpc = AMDGPU::SI_RETURN_TO_EPILOG;
-  else if (CC == CallingConv::AMDGPU_Gfx)
-    ReturnOpc = AMDGPU::S_SETPC_B64_return_gfx;
-  else
-    ReturnOpc = AMDGPU::S_SETPC_B64_return;
-
+  unsigned ReturnOpc =
+      IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::SI_RETURN;
   auto Ret = B.buildInstrNoInsert(ReturnOpc);
-  Register ReturnAddrVReg;
-  if (ReturnOpc == AMDGPU::S_SETPC_B64_return) {
-    ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass);
-    Ret.addUse(ReturnAddrVReg);
-  } else if (ReturnOpc == AMDGPU::S_SETPC_B64_return_gfx) {
-    ReturnAddrVReg =
-        MRI.createVirtualRegister(&AMDGPU::Gfx_CCR_SGPR_64RegClass);
-    Ret.addUse(ReturnAddrVReg);
-  }
 
   if (!FLI.CanLowerReturn)
     insertSRetStores(B, Val->getType(), VRegs, FLI.DemoteRegister);
   else if (!lowerReturnVal(B, Val, VRegs, Ret))
     return false;
 
-  if (ReturnOpc == AMDGPU::S_SETPC_B64_return ||
-      ReturnOpc == AMDGPU::S_SETPC_B64_return_gfx) {
-    const SIRegisterInfo *TRI = ST.getRegisterInfo();
-    Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF),
-                                         &AMDGPU::SGPR_64RegClass);
-    B.buildCopy(ReturnAddrVReg, LiveInReturn);
-  }
-
   // TODO: Handle CalleeSavedRegsViaCopy.
 
   B.insertInstr(Ret);
@@ -613,14 +587,6 @@ bool AMDGPUCallLowering::lowerFormalArguments(
   SmallVector<CCValAssign, 16> ArgLocs;
   CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext());
 
-  if (!IsEntryFunc) {
-    Register ReturnAddrReg = TRI->getReturnAddressReg(MF);
-    Register LiveInReturn = MF.addLiveIn(ReturnAddrReg,
-                                         &AMDGPU::SGPR_64RegClass);
-    MBB.addLiveIn(ReturnAddrReg);
-    B.buildCopy(LiveInReturn, ReturnAddrReg);
-  }
-
   if (Info->hasImplicitBufferPtr()) {
     Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI);
     MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCallingConv.td b/llvm/lib/Target/AMDGPU/AMDGPUCallingConv.td
index 1682d43ae671a..dec227777d8f5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCallingConv.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCallingConv.td
@@ -152,6 +152,10 @@ def CSR_AMDGPU_AGPRs_32_255 : CalleeSavedRegs<
   (sequence "AGPR%u", 32, 255)
 >;
 
+def CSR_AMDGPU_SGPRs_30_31 : CalleeSavedRegs<
+  (sequence "SGPR%u", 30, 31)
+>;
+
 def CSR_AMDGPU_SGPRs_32_105 : CalleeSavedRegs<
   (sequence "SGPR%u", 32, 105)
 >;
@@ -182,7 +186,7 @@ def CSR_AMDGPU_AllAllocatableSRegs : CalleeSavedRegs<
 >;
 
 def CSR_AMDGPU_HighRegs : CalleeSavedRegs<
-  (add CSR_AMDGPU_VGPRs, CSR_AMDGPU_SGPRs_32_105)
+  (add CSR_AMDGPU_VGPRs, CSR_AMDGPU_SGPRs_30_31, CSR_AMDGPU_SGPRs_32_105)
 >;
 
 def CSR_AMDGPU_HighRegs_With_AGPRs : CalleeSavedRegs<
@@ -190,7 +194,7 @@ def CSR_AMDGPU_HighRegs_With_AGPRs : CalleeSavedRegs<
 >;
 
 def CSR_AMDGPU_SI_Gfx : CalleeSavedRegs<
-  (add CSR_AMDGPU_VGPRs, CSR_AMDGPU_SI_Gfx_SGPRs_4_29, CSR_AMDGPU_SI_Gfx_SGPRs_64_105)
+  (add CSR_AMDGPU_VGPRs, CSR_AMDGPU_SI_Gfx_SGPRs_4_29, CSR_AMDGPU_SGPRs_30_31, CSR_AMDGPU_SI_Gfx_SGPRs_64_105)
 >;
 
 def CSR_AMDGPU_SI_Gfx_With_AGPRs : CalleeSavedRegs<

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 0fbdb0d33b74d..94a9e882e310c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4406,7 +4406,6 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
   NODE_NAME_CASE(TC_RETURN)
   NODE_NAME_CASE(TRAP)
   NODE_NAME_CASE(RET_FLAG)
-  NODE_NAME_CASE(RET_GFX_FLAG)
   NODE_NAME_CASE(RETURN_TO_EPILOG)
   NODE_NAME_CASE(ENDPGM)
   NODE_NAME_CASE(DWORDADDR)

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index cfd91426270fe..10eecb68fa1de 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -367,9 +367,6 @@ enum NodeType : unsigned {
   // Return with values from a non-entry function.
   RET_FLAG,
 
-  // Return with values from a non-entry function (AMDGPU_Gfx CC).
-  RET_GFX_FLAG,
-
   DWORDADDR,
   FRACT,
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
index 391dc84285392..23b8fcf75f168 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstrInfo.td
@@ -355,11 +355,7 @@ def AMDGPUendpgm : SDNode<"AMDGPUISD::ENDPGM", SDTNone,
 def AMDGPUreturn_to_epilog : SDNode<"AMDGPUISD::RETURN_TO_EPILOG", SDTNone,
     [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
 
-def AMDGPUret_flag : SDNode<"AMDGPUISD::RET_FLAG", SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
-  [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]
->;
-
-def AMDGPUret_gfx_flag : SDNode<"AMDGPUISD::RET_GFX_FLAG", SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
+def AMDGPUret_flag : SDNode<"AMDGPUISD::RET_FLAG", SDTNone,
   [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]
 >;
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
index cf86f80b9684a..15db3e8426b65 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUMCInstLower.cpp
@@ -120,8 +120,7 @@ void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
   // FIXME: Should be able to handle this with emitPseudoExpansionLowering. We
   // need to select it to the subtarget specific version, and there's no way to
   // do that with a single pseudo source operation.
-  if (Opcode == AMDGPU::S_SETPC_B64_return ||
-      Opcode == AMDGPU::S_SETPC_B64_return_gfx)
+  if (Opcode == AMDGPU::S_SETPC_B64_return)
     Opcode = AMDGPU::S_SETPC_B64;
   else if (Opcode == AMDGPU::SI_CALL) {
     // SI_CALL is just S_SWAPPC_B64 with an additional operand to track the

diff  --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index d12ff4a142128..97ec3f5288c94 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -1344,6 +1344,20 @@ void SIFrameLowering::determineCalleeSavesSGPR(MachineFunction &MF,
   // FP will be specially managed like SP.
   if (WillHaveFP || hasFP(MF))
     SavedRegs.reset(MFI->getFrameOffsetReg());
+
+  // Return address use with return instruction is hidden through the SI_RETURN
+  // pseudo. Given that and since the IPRA computes actual register usage and
+  // does not use CSR list, the clobbering of return address by function calls
+  // (D117243) or otherwise (D120922) is ignored/not seen by the IPRA's register
+  // usage collection. This will ensure save/restore of return address happens
+  // in those scenarios.
+  const MachineRegisterInfo &MRI = MF.getRegInfo();
+  Register RetAddrReg = TRI->getReturnAddressReg(MF);
+  if (!MFI->isEntryFunction() &&
+      (FrameInfo.hasCalls() || MRI.isPhysRegModified(RetAddrReg))) {
+    SavedRegs.set(TRI->getSubReg(RetAddrReg, AMDGPU::sub0));
+    SavedRegs.set(TRI->getSubReg(RetAddrReg, AMDGPU::sub1));
+  }
 }
 
 bool SIFrameLowering::assignCalleeSavedSpillSlots(

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 8a33ec4a2fc8f..66bc7b7c9b0cb 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -2666,24 +2666,6 @@ SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
   SmallVector<SDValue, 48> RetOps;
   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
 
-  // Add return address for callable functions.
-  if (!Info->isEntryFunction()) {
-    const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
-    SDValue ReturnAddrReg = CreateLiveInRegister(
-      DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
-
-    SDValue ReturnAddrVirtualReg =
-        DAG.getRegister(MF.getRegInfo().createVirtualRegister(
-                            CallConv != CallingConv::AMDGPU_Gfx
-                                ? &AMDGPU::CCR_SGPR_64RegClass
-                                : &AMDGPU::Gfx_CCR_SGPR_64RegClass),
-                        MVT::i64);
-    Chain =
-        DAG.getCopyToReg(Chain, DL, ReturnAddrVirtualReg, ReturnAddrReg, Flag);
-    Flag = Chain.getValue(1);
-    RetOps.push_back(ReturnAddrVirtualReg);
-  }
-
   // Copy the result values into the output registers.
   for (unsigned I = 0, RealRVLocIdx = 0, E = RVLocs.size(); I != E;
        ++I, ++RealRVLocIdx) {
@@ -2740,15 +2722,8 @@ SITargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
     RetOps.push_back(Flag);
 
   unsigned Opc = AMDGPUISD::ENDPGM;
-  if (!IsWaveEnd) {
-    if (IsShader)
-      Opc = AMDGPUISD::RETURN_TO_EPILOG;
-    else if (CallConv == CallingConv::AMDGPU_Gfx)
-      Opc = AMDGPUISD::RET_GFX_FLAG;
-    else
-      Opc = AMDGPUISD::RET_FLAG;
-  }
-
+  if (!IsWaveEnd)
+    Opc = IsShader ? AMDGPUISD::RETURN_TO_EPILOG : AMDGPUISD::RET_FLAG;
   return DAG.getNode(Opc, DL, MVT::Other, RetOps);
 }
 
@@ -3330,21 +3305,6 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
   }
 
 
-  SDValue PhysReturnAddrReg;
-  if (IsTailCall) {
-    // Since the return is being combined with the call, we need to pass on the
-    // return address.
-
-    const SIRegisterInfo *TRI = getSubtarget()->getRegisterInfo();
-    SDValue ReturnAddrReg = CreateLiveInRegister(
-      DAG, &AMDGPU::SReg_64RegClass, TRI->getReturnAddressReg(MF), MVT::i64);
-
-    PhysReturnAddrReg = DAG.getRegister(TRI->getReturnAddressReg(MF),
-                                        MVT::i64);
-    Chain = DAG.getCopyToReg(Chain, DL, PhysReturnAddrReg, ReturnAddrReg, InFlag);
-    InFlag = Chain.getValue(1);
-  }
-
   // We don't usually want to end the call-sequence here because we would tidy
   // the frame up *after* the call, however in the ABI-changing tail-call case
   // we've carefully laid out the parameters so that when sp is reset they'll be
@@ -3374,8 +3334,6 @@ SDValue SITargetLowering::LowerCall(CallLoweringInfo &CLI,
     // this information must travel along with the operation for eventual
     // consumption by emitEpilogue.
     Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32));
-
-    Ops.push_back(PhysReturnAddrReg);
   }
 
   // Add argument registers to the end of the list so that they are known live

diff  --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 89820e3984247..0a6f8551b4318 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -955,8 +955,8 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(
   // NOTE: this could be improved with knowledge of all call sites or
   //   with knowledge of the called routines.
   if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
+      MI.getOpcode() == AMDGPU::SI_RETURN ||
       MI.getOpcode() == AMDGPU::S_SETPC_B64_return ||
-      MI.getOpcode() == AMDGPU::S_SETPC_B64_return_gfx ||
       (MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) {
     Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt()));
   }

diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 87e0a93efea71..542095ccea672 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -2132,6 +2132,23 @@ bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
     MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64));
     break;
   }
+  case AMDGPU::SI_RETURN: {
+    const MachineFunction *MF = MBB.getParent();
+    const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
+    const SIRegisterInfo *TRI = ST.getRegisterInfo();
+    // Hiding the return address use with SI_RETURN may lead to extra kills in
+    // the function and missing live-ins. We are fine in practice because callee
+    // saved register handling ensures the register value is restored before
+    // RET, but we need the undef flag here to appease the MachineVerifier
+    // liveness checks.
+    MachineInstrBuilder MIB =
+        BuildMI(MBB, MI, DL, get(AMDGPU::S_SETPC_B64_return))
+            .addReg(TRI->getReturnAddressReg(*MF), RegState::Undef);
+
+    MIB.copyImplicitOps(MI);
+    MI.eraseFromParent();
+    break;
+  }
   }
   return true;
 }

diff  --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 81bfcb72d12a9..26acaa738c863 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -487,7 +487,7 @@ def SI_RETURN_TO_EPILOG : SPseudoInstSI <
 
 // Return for returning function calls.
 def SI_RETURN : SPseudoInstSI <
-  (outs), (ins), [],
+  (outs), (ins), [(AMDGPUret_flag)],
   "; return"> {
   let isTerminator = 1;
   let isBarrier = 1;

diff  --git a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
index ddc19440c2558..fcc3a2c750301 100644
--- a/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
+++ b/llvm/lib/Target/AMDGPU/SILowerSGPRSpills.cpp
@@ -79,6 +79,8 @@ static void insertCSRSaves(MachineBasicBlock &SaveBlock,
   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
+  const SIRegisterInfo *RI = ST.getRegisterInfo();
 
   MachineBasicBlock::iterator I = SaveBlock.begin();
   if (!TFI->spillCalleeSavedRegisters(SaveBlock, I, CSI, TRI)) {
@@ -89,8 +91,8 @@ static void insertCSRSaves(MachineBasicBlock &SaveBlock,
       MCRegister Reg = CS.getReg();
 
       MachineInstrSpan MIS(I, &SaveBlock);
-      const TargetRegisterClass *RC =
-        TRI->getMinimalPhysRegClass(Reg, MVT::i32);
+      const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(
+          Reg, Reg == RI->getReturnAddressReg(MF) ? MVT::i64 : MVT::i32);
 
       // If this value was already livein, we probably have a direct use of the
       // incoming register value, so don't kill at the spill point. This happens
@@ -119,7 +121,8 @@ static void insertCSRRestores(MachineBasicBlock &RestoreBlock,
   const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
   const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
   const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
-
+  const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
+  const SIRegisterInfo *RI = ST.getRegisterInfo();
   // Restore all registers immediately before the return and any
   // terminators that precede it.
   MachineBasicBlock::iterator I = RestoreBlock.getFirstTerminator();
@@ -128,8 +131,8 @@ static void insertCSRRestores(MachineBasicBlock &RestoreBlock,
   if (!TFI->restoreCalleeSavedRegisters(RestoreBlock, I, CSI, TRI)) {
     for (const CalleeSavedInfo &CI : reverse(CSI)) {
       Register Reg = CI.getReg();
-      const TargetRegisterClass *RC =
-        TRI->getMinimalPhysRegClass(Reg, MVT::i32);
+      const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(
+          Reg, Reg == RI->getReturnAddressReg(MF) ? MVT::i64 : MVT::i32);
 
       TII.loadRegFromStackSlot(RestoreBlock, I, Reg, CI.getFrameIdx(), RC, TRI);
       assert(I != RestoreBlock.begin() &&

diff  --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index 4d6557c23b720..3efd1d2a6c6bb 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -701,23 +701,6 @@ def SGPR_64 : SIRegisterClass<"AMDGPU", [v2i32, i64, v2f32, f64, v4i16, v4f16],
   let HasSGPR = 1;
 }
 
-// CCR (call clobbered registers) SGPR 64-bit registers
-def CCR_SGPR_64 : SIRegisterClass<"AMDGPU", SGPR_64.RegTypes, 32,
-                                (add (trunc SGPR_64, 16))> {
-  let CopyCost = SGPR_64.CopyCost;
-  let AllocationPriority = SGPR_64.AllocationPriority;
-  let HasSGPR = 1;
-}
-
-// Call clobbered 64-bit SGPRs for AMDGPU_Gfx CC
-def Gfx_CCR_SGPR_64 : SIRegisterClass<"AMDGPU", SGPR_64.RegTypes, 32,
-                                (add (trunc (shl SGPR_64, 15), 1), // s[30:31]
-                                     (trunc (shl SGPR_64, 18), 14))> { // s[36:37]-s[s62:63]
-  let CopyCost = SGPR_64.CopyCost;
-  let AllocationPriority = SGPR_64.AllocationPriority;
-  let HasSGPR = 1;
-}
-
 def TTMP_64 : SIRegisterClass<"AMDGPU", [v2i32, i64, f64, v4i16, v4f16], 32,
                             (add TTMP_64Regs)> {
   let isAllocatable = 0;

diff  --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index 73106af35c94f..86d1feb183f44 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -152,8 +152,8 @@ class SOP1_64_0 <string opName, list<dag> pattern=[]> : SOP1_Pseudo <
 }
 
 // 64-bit input, no output
-class SOP1_1 <string opName, RegisterClass rc = SReg_64, list<dag> pattern=[]> : SOP1_Pseudo <
-  opName, (outs), (ins rc:$src0), "$src0", pattern> {
+class SOP1_1 <string opName, list<dag> pattern=[]> : SOP1_Pseudo <
+  opName, (outs), (ins SReg_64:$src0), "$src0", pattern> {
   let has_sdst = 0;
 }
 
@@ -300,8 +300,7 @@ def S_SETPC_B64 : SOP1_1  <"s_setpc_b64">;
 
 let isReturn = 1 in {
 // Define variant marked as return rather than branch.
-def S_SETPC_B64_return : SOP1_1<"", CCR_SGPR_64, [(AMDGPUret_flag i64:$src0)]>;
-def S_SETPC_B64_return_gfx : SOP1_1<"", Gfx_CCR_SGPR_64, [(AMDGPUret_gfx_flag i64:$src0)]>;
+def S_SETPC_B64_return : SOP1_1<"">;
 }
 } // End isTerminator = 1, isBarrier = 1
 

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/assert-align.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/assert-align.ll
index 225dec6d39d72..031925033ec3c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/assert-align.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/assert-align.ll
@@ -22,17 +22,17 @@ define i32 addrspace(1)* @call_assert_align() {
 ; CHECK-NEXT:    s_addc_u32 s17, s17, ext at rel32@hi+12
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; CHECK-NEXT:    v_mov_b32_e32 v2, 0
-; CHECK-NEXT:    v_readlane_b32 s4, v40, 0
 ; CHECK-NEXT:    global_store_dword v[0:1], v2, off
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
-; CHECK-NEXT:    v_readlane_b32 s5, v40, 1
+; CHECK-NEXT:    v_readlane_b32 s31, v40, 1
+; CHECK-NEXT:    v_readlane_b32 s30, v40, 0
 ; CHECK-NEXT:    s_addk_i32 s32, 0xfc00
 ; CHECK-NEXT:    v_readlane_b32 s33, v40, 2
-; CHECK-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; CHECK-NEXT:    s_mov_b64 exec, s[6:7]
+; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
-; CHECK-NEXT:    s_setpc_b64 s[4:5]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %call = call align 4 i32 addrspace(1)* @ext(i8 addrspace(1)* null)
   store volatile i32 0, i32 addrspace(1)* %call

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll
index 7521a3c2c6139..1aa25c4b9c372 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll
@@ -256,15 +256,15 @@ define void @func_caller_stack() {
 ; MUBUF-NEXT:    s_addc_u32 s5, s5, external_void_func_v16i32_v16i32_v4i32 at rel32@hi+12
 ; MUBUF-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:16
 ; MUBUF-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; MUBUF-NEXT:    v_readlane_b32 s4, v40, 0
-; MUBUF-NEXT:    v_readlane_b32 s5, v40, 1
+; MUBUF-NEXT:    v_readlane_b32 s31, v40, 1
+; MUBUF-NEXT:    v_readlane_b32 s30, v40, 0
 ; MUBUF-NEXT:    s_addk_i32 s32, 0xfc00
 ; MUBUF-NEXT:    v_readlane_b32 s33, v40, 2
-; MUBUF-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; MUBUF-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; MUBUF-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; MUBUF-NEXT:    s_mov_b64 exec, s[6:7]
+; MUBUF-NEXT:    s_mov_b64 exec, s[4:5]
 ; MUBUF-NEXT:    s_waitcnt vmcnt(0)
-; MUBUF-NEXT:    s_setpc_b64 s[4:5]
+; MUBUF-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; FLATSCR-LABEL: func_caller_stack:
 ; FLATSCR:       ; %bb.0:
@@ -289,15 +289,15 @@ define void @func_caller_stack() {
 ; FLATSCR-NEXT:    s_addc_u32 s1, s1, external_void_func_v16i32_v16i32_v4i32 at rel32@hi+12
 ; FLATSCR-NEXT:    scratch_store_dword off, v0, s32 offset:16
 ; FLATSCR-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; FLATSCR-NEXT:    v_readlane_b32 s0, v40, 0
-; FLATSCR-NEXT:    v_readlane_b32 s1, v40, 1
+; FLATSCR-NEXT:    v_readlane_b32 s31, v40, 1
+; FLATSCR-NEXT:    v_readlane_b32 s30, v40, 0
 ; FLATSCR-NEXT:    s_add_i32 s32, s32, -16
 ; FLATSCR-NEXT:    v_readlane_b32 s33, v40, 2
-; FLATSCR-NEXT:    s_or_saveexec_b64 s[2:3], -1
+; FLATSCR-NEXT:    s_or_saveexec_b64 s[0:1], -1
 ; FLATSCR-NEXT:    scratch_load_dword v40, off, s32 ; 4-byte Folded Reload
-; FLATSCR-NEXT:    s_mov_b64 exec, s[2:3]
+; FLATSCR-NEXT:    s_mov_b64 exec, s[0:1]
 ; FLATSCR-NEXT:    s_waitcnt vmcnt(0)
-; FLATSCR-NEXT:    s_setpc_b64 s[0:1]
+; FLATSCR-NEXT:    s_setpc_b64 s[30:31]
   call void @external_void_func_v16i32_v16i32_v4i32(<16 x i32> undef, <16 x i32> undef, <4 x i32> <i32 9, i32 10, i32 11, i32 12>)
   ret void
 }
@@ -373,15 +373,15 @@ define void @func_caller_byval([16 x i32] addrspace(5)* %argptr) {
 ; MUBUF-NEXT:    s_waitcnt vmcnt(1)
 ; MUBUF-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:60
 ; MUBUF-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; MUBUF-NEXT:    v_readlane_b32 s4, v40, 0
-; MUBUF-NEXT:    v_readlane_b32 s5, v40, 1
+; MUBUF-NEXT:    v_readlane_b32 s31, v40, 1
+; MUBUF-NEXT:    v_readlane_b32 s30, v40, 0
 ; MUBUF-NEXT:    s_addk_i32 s32, 0xfc00
 ; MUBUF-NEXT:    v_readlane_b32 s33, v40, 2
-; MUBUF-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; MUBUF-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; MUBUF-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; MUBUF-NEXT:    s_mov_b64 exec, s[6:7]
+; MUBUF-NEXT:    s_mov_b64 exec, s[4:5]
 ; MUBUF-NEXT:    s_waitcnt vmcnt(0)
-; MUBUF-NEXT:    s_setpc_b64 s[4:5]
+; MUBUF-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; FLATSCR-LABEL: func_caller_byval:
 ; FLATSCR:       ; %bb.0:
@@ -422,15 +422,15 @@ define void @func_caller_byval([16 x i32] addrspace(5)* %argptr) {
 ; FLATSCR-NEXT:    s_waitcnt vmcnt(0)
 ; FLATSCR-NEXT:    scratch_store_dwordx2 off, v[0:1], s32 offset:56
 ; FLATSCR-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; FLATSCR-NEXT:    v_readlane_b32 s0, v40, 0
-; FLATSCR-NEXT:    v_readlane_b32 s1, v40, 1
+; FLATSCR-NEXT:    v_readlane_b32 s31, v40, 1
+; FLATSCR-NEXT:    v_readlane_b32 s30, v40, 0
 ; FLATSCR-NEXT:    s_add_i32 s32, s32, -16
 ; FLATSCR-NEXT:    v_readlane_b32 s33, v40, 2
-; FLATSCR-NEXT:    s_or_saveexec_b64 s[2:3], -1
+; FLATSCR-NEXT:    s_or_saveexec_b64 s[0:1], -1
 ; FLATSCR-NEXT:    scratch_load_dword v40, off, s32 ; 4-byte Folded Reload
-; FLATSCR-NEXT:    s_mov_b64 exec, s[2:3]
+; FLATSCR-NEXT:    s_mov_b64 exec, s[0:1]
 ; FLATSCR-NEXT:    s_waitcnt vmcnt(0)
-; FLATSCR-NEXT:    s_setpc_b64 s[0:1]
+; FLATSCR-NEXT:    s_setpc_b64 s[30:31]
   %cast = bitcast [16 x i32] addrspace(5)* %argptr to i8 addrspace(5)*
   call void @external_void_func_byval([16 x i32] addrspace(5)* byval([16 x i32]) %argptr)
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-mul-post-legalize.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-mul-post-legalize.mir
index 52665fa9c4463..b40409496cee3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-mul-post-legalize.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-mul-post-legalize.mir
@@ -12,186 +12,150 @@
 name:            test_f32_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2
 
     ; GFX9-LABEL: name: test_f32_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[COPY2]]
     ; GFX9-NEXT: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX9-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-CONTRACT-LABEL: name: test_f32_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-CONTRACT-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-CONTRACT-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX9-CONTRACT-NEXT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-DENORM-LABEL: name: test_f32_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-DENORM-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-DENORM-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[COPY2]]
     ; GFX9-DENORM-NEXT: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-UNSAFE-LABEL: name: test_f32_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-UNSAFE-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-UNSAFE-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-LABEL: name: test_f32_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[COPY2]]
     ; GFX10-NEXT: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX10-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-CONTRACT-LABEL: name: test_f32_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-CONTRACT-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-CONTRACT-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX10-CONTRACT-NEXT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-DENORM-LABEL: name: test_f32_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-DENORM-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-DENORM-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[FMUL]], [[COPY2]]
     ; GFX10-DENORM-NEXT: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-UNSAFE-LABEL: name: test_f32_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-UNSAFE-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-UNSAFE-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX10-UNSAFE-NEXT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = COPY $vgpr2
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %4:_(s32) = G_FMUL %0, %1
     %5:_(s32) = G_FADD %4, %2
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
 name:            test_f32_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2
 
     ; GFX9-LABEL: name: test_f32_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY2]], [[FMUL]]
     ; GFX9-NEXT: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX9-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-CONTRACT-LABEL: name: test_f32_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-CONTRACT-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-CONTRACT-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX9-CONTRACT-NEXT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-DENORM-LABEL: name: test_f32_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-DENORM-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-DENORM-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY2]], [[FMUL]]
     ; GFX9-DENORM-NEXT: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-UNSAFE-LABEL: name: test_f32_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-UNSAFE-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-UNSAFE-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-LABEL: name: test_f32_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY2]], [[FMUL]]
     ; GFX10-NEXT: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX10-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-CONTRACT-LABEL: name: test_f32_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-CONTRACT-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-CONTRACT-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX10-CONTRACT-NEXT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-DENORM-LABEL: name: test_f32_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-DENORM-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-DENORM-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY2]], [[FMUL]]
     ; GFX10-DENORM-NEXT: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-UNSAFE-LABEL: name: test_f32_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-UNSAFE-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-UNSAFE-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX10-UNSAFE-NEXT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = COPY $vgpr2
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %4:_(s32) = G_FMUL %0, %1
     %5:_(s32) = G_FADD %2, %4
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -404,7 +368,7 @@ body: |
 name:            test_half_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2
 
     ; GFX9-LABEL: name: test_half_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -413,13 +377,11 @@ body:             |
     ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC2]]
     ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-CONTRACT-LABEL: name: test_half_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -427,12 +389,10 @@ body:             |
     ; GFX9-CONTRACT-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-CONTRACT-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-CONTRACT-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX9-CONTRACT-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX9-CONTRACT-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-DENORM-LABEL: name: test_half_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -440,13 +400,11 @@ body:             |
     ; GFX9-DENORM-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-DENORM-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-DENORM-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC2]]
     ; GFX9-DENORM-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX9-DENORM-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-UNSAFE-LABEL: name: test_half_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -454,12 +412,10 @@ body:             |
     ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-UNSAFE-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-LABEL: name: test_half_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -467,13 +423,11 @@ body:             |
     ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC2]]
     ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-CONTRACT-LABEL: name: test_half_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -481,12 +435,10 @@ body:             |
     ; GFX10-CONTRACT-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-CONTRACT-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-CONTRACT-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX10-CONTRACT-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX10-CONTRACT-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-DENORM-LABEL: name: test_half_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -494,13 +446,11 @@ body:             |
     ; GFX10-DENORM-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-DENORM-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-DENORM-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[FMUL]], [[TRUNC2]]
     ; GFX10-DENORM-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX10-DENORM-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-UNSAFE-LABEL: name: test_half_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -508,32 +458,28 @@ body:             |
     ; GFX10-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-UNSAFE-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX10-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX10-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     %4:_(s32) = COPY $vgpr0
     %0:_(s16) = G_TRUNC %4(s32)
     %5:_(s32) = COPY $vgpr1
     %1:_(s16) = G_TRUNC %5(s32)
     %6:_(s32) = COPY $vgpr2
     %2:_(s16) = G_TRUNC %6(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %7:_(s16) = G_FMUL %0, %1
     %8:_(s16) = G_FADD %7, %2
     %10:_(s32) = G_ANYEXT %8(s16)
     $vgpr0 = COPY %10(s32)
-    %9:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %9, implicit $vgpr0
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
 name:            test_half_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2
 
     ; GFX9-LABEL: name: test_half_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -542,13 +488,11 @@ body:             |
     ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[TRUNC2]], [[FMUL]]
     ; GFX9-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX9-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-CONTRACT-LABEL: name: test_half_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -556,12 +500,10 @@ body:             |
     ; GFX9-CONTRACT-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-CONTRACT-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-CONTRACT-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX9-CONTRACT-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX9-CONTRACT-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-DENORM-LABEL: name: test_half_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -569,13 +511,11 @@ body:             |
     ; GFX9-DENORM-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-DENORM-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-DENORM-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[TRUNC2]], [[FMUL]]
     ; GFX9-DENORM-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX9-DENORM-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-UNSAFE-LABEL: name: test_half_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -583,12 +523,10 @@ body:             |
     ; GFX9-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-UNSAFE-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX9-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-LABEL: name: test_half_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -596,13 +534,11 @@ body:             |
     ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[TRUNC2]], [[FMUL]]
     ; GFX10-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX10-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-CONTRACT-LABEL: name: test_half_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -610,12 +546,10 @@ body:             |
     ; GFX10-CONTRACT-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-CONTRACT-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-CONTRACT-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX10-CONTRACT-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX10-CONTRACT-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-DENORM-LABEL: name: test_half_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -623,13 +557,11 @@ body:             |
     ; GFX10-DENORM-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-DENORM-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-DENORM-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s16) = G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s16) = G_FADD [[TRUNC2]], [[FMUL]]
     ; GFX10-DENORM-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX10-DENORM-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-UNSAFE-LABEL: name: test_half_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -637,32 +569,28 @@ body:             |
     ; GFX10-UNSAFE-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-UNSAFE-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX10-UNSAFE-NEXT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX10-UNSAFE-NEXT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     %4:_(s32) = COPY $vgpr0
     %0:_(s16) = G_TRUNC %4(s32)
     %5:_(s32) = COPY $vgpr1
     %1:_(s16) = G_TRUNC %5(s32)
     %6:_(s32) = COPY $vgpr2
     %2:_(s16) = G_TRUNC %6(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %7:_(s16) = G_FMUL %0, %1
     %8:_(s16) = G_FADD %2, %7
     %10:_(s32) = G_ANYEXT %8(s16)
     $vgpr0 = COPY %10(s32)
-    %9:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %9, implicit $vgpr0
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
 name:            test_double_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
 
     ; GFX9-LABEL: name: test_double_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -674,14 +602,12 @@ body:             |
     ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[MV]], [[MV1]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[MV2]]
     ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-CONTRACT-LABEL: name: test_double_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -692,13 +618,11 @@ body:             |
     ; GFX9-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-CONTRACT-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-CONTRACT-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX9-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX9-CONTRACT-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-DENORM-LABEL: name: test_double_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -709,14 +633,12 @@ body:             |
     ; GFX9-DENORM-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-DENORM-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-DENORM-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[MV]], [[MV1]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[MV2]]
     ; GFX9-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX9-DENORM-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-DENORM-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-UNSAFE-LABEL: name: test_double_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -727,13 +649,11 @@ body:             |
     ; GFX9-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-UNSAFE-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-UNSAFE-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-LABEL: name: test_double_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -744,14 +664,12 @@ body:             |
     ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[MV]], [[MV1]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[MV2]]
     ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-CONTRACT-LABEL: name: test_double_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -762,13 +680,11 @@ body:             |
     ; GFX10-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-CONTRACT-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-CONTRACT-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX10-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX10-CONTRACT-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-DENORM-LABEL: name: test_double_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -779,14 +695,12 @@ body:             |
     ; GFX10-DENORM-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-DENORM-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-DENORM-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[MV]], [[MV1]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[FMUL]], [[MV2]]
     ; GFX10-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX10-DENORM-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-DENORM-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-UNSAFE-LABEL: name: test_double_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -797,13 +711,11 @@ body:             |
     ; GFX10-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-UNSAFE-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-UNSAFE-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX10-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX10-UNSAFE-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %0:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
@@ -813,21 +725,19 @@ body:             |
     %8:_(s32) = COPY $vgpr4
     %9:_(s32) = COPY $vgpr5
     %2:_(s64) = G_MERGE_VALUES %8(s32), %9(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %10:_(s64) = G_FMUL %0, %1
     %11:_(s64) = G_FADD %10, %2
     %13:_(s32), %14:_(s32) = G_UNMERGE_VALUES %11(s64)
     $vgpr0 = COPY %13(s32)
     $vgpr1 = COPY %14(s32)
-    %12:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %12, implicit $vgpr0, implicit $vgpr1
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
 ...
 
 ---
 name:            test_double_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
 
     ; GFX9-LABEL: name: test_double_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -839,14 +749,12 @@ body:             |
     ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[MV]], [[MV1]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[MV2]], [[FMUL]]
     ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-CONTRACT-LABEL: name: test_double_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -857,13 +765,11 @@ body:             |
     ; GFX9-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-CONTRACT-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-CONTRACT-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX9-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX9-CONTRACT-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-DENORM-LABEL: name: test_double_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -874,14 +780,12 @@ body:             |
     ; GFX9-DENORM-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-DENORM-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-DENORM-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[MV]], [[MV1]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[MV2]], [[FMUL]]
     ; GFX9-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX9-DENORM-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-DENORM-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-UNSAFE-LABEL: name: test_double_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -892,13 +796,11 @@ body:             |
     ; GFX9-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-UNSAFE-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-UNSAFE-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-LABEL: name: test_double_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -909,14 +811,12 @@ body:             |
     ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[MV]], [[MV1]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[MV2]], [[FMUL]]
     ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-CONTRACT-LABEL: name: test_double_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -927,13 +827,11 @@ body:             |
     ; GFX10-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-CONTRACT-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-CONTRACT-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX10-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX10-CONTRACT-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-DENORM-LABEL: name: test_double_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -944,14 +842,12 @@ body:             |
     ; GFX10-DENORM-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-DENORM-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-DENORM-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[MV]], [[MV1]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[MV2]], [[FMUL]]
     ; GFX10-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX10-DENORM-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-DENORM-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-UNSAFE-LABEL: name: test_double_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -962,13 +858,11 @@ body:             |
     ; GFX10-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-UNSAFE-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-UNSAFE-NEXT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX10-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX10-UNSAFE-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %0:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
@@ -978,21 +872,19 @@ body:             |
     %8:_(s32) = COPY $vgpr4
     %9:_(s32) = COPY $vgpr5
     %2:_(s64) = G_MERGE_VALUES %8(s32), %9(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %10:_(s64) = G_FMUL %0, %1
     %11:_(s64) = G_FADD %2, %10
     %13:_(s32), %14:_(s32) = G_UNMERGE_VALUES %11(s64)
     $vgpr0 = COPY %13(s32)
     $vgpr1 = COPY %14(s32)
-    %12:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %12, implicit $vgpr0, implicit $vgpr1
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
 ...
 
 ---
 name:            test_4xfloat_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10
 
     ; GFX9-LABEL: name: test_4xfloat_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -1010,7 +902,6 @@ body:             |
     ; GFX9-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX9-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX9-NEXT: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(<4 x s32>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -1018,8 +909,7 @@ body:             |
     ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
     ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX9-NEXT: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX9-CONTRACT-LABEL: name: test_4xfloat_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1036,7 +926,6 @@ body:             |
     ; GFX9-CONTRACT-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX9-CONTRACT-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX9-CONTRACT-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<4 x s32>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -1044,8 +933,7 @@ body:             |
     ; GFX9-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-CONTRACT-NEXT: $vgpr2 = COPY [[UV2]](s32)
     ; GFX9-CONTRACT-NEXT: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX9-DENORM-LABEL: name: test_4xfloat_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1062,7 +950,6 @@ body:             |
     ; GFX9-DENORM-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX9-DENORM-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX9-DENORM-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<4 x s32>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -1070,8 +957,7 @@ body:             |
     ; GFX9-DENORM-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-DENORM-NEXT: $vgpr2 = COPY [[UV2]](s32)
     ; GFX9-DENORM-NEXT: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX9-UNSAFE-LABEL: name: test_4xfloat_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1088,7 +974,6 @@ body:             |
     ; GFX9-UNSAFE-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX9-UNSAFE-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<4 x s32>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -1096,8 +981,7 @@ body:             |
     ; GFX9-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-UNSAFE-NEXT: $vgpr2 = COPY [[UV2]](s32)
     ; GFX9-UNSAFE-NEXT: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX10-LABEL: name: test_4xfloat_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1114,7 +998,6 @@ body:             |
     ; GFX10-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX10-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX10-NEXT: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(<4 x s32>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -1122,8 +1005,7 @@ body:             |
     ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
     ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX10-NEXT: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX10-CONTRACT-LABEL: name: test_4xfloat_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1140,7 +1022,6 @@ body:             |
     ; GFX10-CONTRACT-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX10-CONTRACT-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX10-CONTRACT-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<4 x s32>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -1148,8 +1029,7 @@ body:             |
     ; GFX10-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-CONTRACT-NEXT: $vgpr2 = COPY [[UV2]](s32)
     ; GFX10-CONTRACT-NEXT: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX10-DENORM-LABEL: name: test_4xfloat_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1166,7 +1046,6 @@ body:             |
     ; GFX10-DENORM-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX10-DENORM-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX10-DENORM-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<4 x s32>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -1174,8 +1053,7 @@ body:             |
     ; GFX10-DENORM-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-DENORM-NEXT: $vgpr2 = COPY [[UV2]](s32)
     ; GFX10-DENORM-NEXT: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX10-UNSAFE-LABEL: name: test_4xfloat_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1192,7 +1070,6 @@ body:             |
     ; GFX10-UNSAFE-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX10-UNSAFE-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX10-UNSAFE-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<4 x s32>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -1200,8 +1077,7 @@ body:             |
     ; GFX10-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-UNSAFE-NEXT: $vgpr2 = COPY [[UV2]](s32)
     ; GFX10-UNSAFE-NEXT: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %6:_(s32) = COPY $vgpr2
@@ -1217,7 +1093,6 @@ body:             |
     %14:_(s32) = COPY $vgpr10
     %15:_(s32) = COPY $vgpr11
     %2:_(<4 x s32>) = G_BUILD_VECTOR %12(s32), %13(s32), %14(s32), %15(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %16:_(<4 x s32>) = G_FMUL %0, %1
     %17:_(<4 x s32>) = G_FADD %16, %2
     %19:_(s32), %20:_(s32), %21:_(s32), %22:_(s32) = G_UNMERGE_VALUES %17(<4 x s32>)
@@ -1225,15 +1100,14 @@ body:             |
     $vgpr1 = COPY %20(s32)
     $vgpr2 = COPY %21(s32)
     $vgpr3 = COPY %22(s32)
-    %18:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %18, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
 ...
 
 ---
 name:            test_3xfloat_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
 
     ; GFX9-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -1248,15 +1122,13 @@ body:             |
     ; GFX9-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX9-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX9-NEXT: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(<3 x s32>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX9-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-NEXT: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX9-NEXT: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX9-CONTRACT-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1270,15 +1142,13 @@ body:             |
     ; GFX9-CONTRACT-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX9-CONTRACT-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX9-CONTRACT-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<3 x s32>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX9-CONTRACT-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-CONTRACT-NEXT: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX9-DENORM-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1292,15 +1162,13 @@ body:             |
     ; GFX9-DENORM-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX9-DENORM-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX9-DENORM-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<3 x s32>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX9-DENORM-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-DENORM-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-DENORM-NEXT: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX9-UNSAFE-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1314,15 +1182,13 @@ body:             |
     ; GFX9-UNSAFE-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX9-UNSAFE-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<3 x s32>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-UNSAFE-NEXT: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX10-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1336,15 +1202,13 @@ body:             |
     ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX10-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX10-NEXT: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(<3 x s32>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX10-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-NEXT: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX10-NEXT: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX10-CONTRACT-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1358,15 +1222,13 @@ body:             |
     ; GFX10-CONTRACT-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX10-CONTRACT-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX10-CONTRACT-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<3 x s32>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX10-CONTRACT-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-CONTRACT-NEXT: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX10-DENORM-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1380,15 +1242,13 @@ body:             |
     ; GFX10-DENORM-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX10-DENORM-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX10-DENORM-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<3 x s32>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX10-DENORM-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-DENORM-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-DENORM-NEXT: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX10-UNSAFE-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1402,15 +1262,13 @@ body:             |
     ; GFX10-UNSAFE-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX10-UNSAFE-NEXT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX10-UNSAFE-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s32>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<3 x s32>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX10-UNSAFE-NEXT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-UNSAFE-NEXT: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %6:_(s32) = COPY $vgpr2
@@ -1423,22 +1281,20 @@ body:             |
     %11:_(s32) = COPY $vgpr7
     %12:_(s32) = COPY $vgpr8
     %2:_(<3 x s32>) = G_BUILD_VECTOR %10(s32), %11(s32), %12(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %13:_(<3 x s32>) = G_FMUL %0, %1
     %14:_(<3 x s32>) = G_FADD %2, %13
     %16:_(s32), %17:_(s32), %18:_(s32) = G_UNMERGE_VALUES %14(<3 x s32>)
     $vgpr0 = COPY %16(s32)
     $vgpr1 = COPY %17(s32)
     $vgpr2 = COPY %18(s32)
-    %15:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %15, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
 ...
 
 ---
 name:            test_4xhalf_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
 
     ; GFX9-LABEL: name: test_4xhalf_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
@@ -1450,14 +1306,12 @@ body:             |
     ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX9-NEXT: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX9-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s16>) = G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(<4 x s16>) = G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX9-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX9-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-CONTRACT-LABEL: name: test_4xhalf_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1468,14 +1322,12 @@ body:             |
     ; GFX9-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX9-CONTRACT-NEXT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX9-CONTRACT-NEXT: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX9-CONTRACT-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s16>) = G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX9-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<4 x s16>) = G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX9-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX9-CONTRACT-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-CONTRACT-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-DENORM-LABEL: name: test_4xhalf_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1486,14 +1338,12 @@ body:             |
     ; GFX9-DENORM-NEXT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX9-DENORM-NEXT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX9-DENORM-NEXT: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX9-DENORM-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s16>) = G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<4 x s16>) = G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX9-DENORM-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX9-DENORM-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-DENORM-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-DENORM-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-UNSAFE-LABEL: name: test_4xhalf_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1504,14 +1354,12 @@ body:             |
     ; GFX9-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX9-UNSAFE-NEXT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX9-UNSAFE-NEXT: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX9-UNSAFE-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s16>) = G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX9-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<4 x s16>) = G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-UNSAFE-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-LABEL: name: test_4xhalf_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1522,14 +1370,12 @@ body:             |
     ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX10-NEXT: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s16>) = G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(<4 x s16>) = G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX10-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-CONTRACT-LABEL: name: test_4xhalf_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1540,14 +1386,12 @@ body:             |
     ; GFX10-CONTRACT-NEXT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX10-CONTRACT-NEXT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX10-CONTRACT-NEXT: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX10-CONTRACT-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s16>) = G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX10-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<4 x s16>) = G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX10-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX10-CONTRACT-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-CONTRACT-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-DENORM-LABEL: name: test_4xhalf_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1558,14 +1402,12 @@ body:             |
     ; GFX10-DENORM-NEXT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX10-DENORM-NEXT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX10-DENORM-NEXT: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX10-DENORM-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s16>) = G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<4 x s16>) = G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX10-DENORM-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX10-DENORM-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-DENORM-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-DENORM-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-UNSAFE-LABEL: name: test_4xhalf_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1576,14 +1418,12 @@ body:             |
     ; GFX10-UNSAFE-NEXT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX10-UNSAFE-NEXT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX10-UNSAFE-NEXT: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX10-UNSAFE-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s16>) = G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX10-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<4 x s16>) = G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX10-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX10-UNSAFE-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-UNSAFE-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     %4:_(<2 x s16>) = COPY $vgpr0
     %5:_(<2 x s16>) = COPY $vgpr1
     %0:_(<4 x s16>) = G_CONCAT_VECTORS %4(<2 x s16>), %5(<2 x s16>)
@@ -1593,21 +1433,19 @@ body:             |
     %8:_(<2 x s16>) = COPY $vgpr4
     %9:_(<2 x s16>) = COPY $vgpr5
     %2:_(<4 x s16>) = G_CONCAT_VECTORS %8(<2 x s16>), %9(<2 x s16>)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %10:_(<4 x s16>) = G_FMUL %0, %1
     %11:_(<4 x s16>) = G_FADD %10, %2
     %13:_(<2 x s16>), %14:_(<2 x s16>) = G_UNMERGE_VALUES %11(<4 x s16>)
     $vgpr0 = COPY %13(<2 x s16>)
     $vgpr1 = COPY %14(<2 x s16>)
-    %12:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %12, implicit $vgpr0, implicit $vgpr1
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
 ...
 
 ---
 name:            test_3xhalf_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
 
     ; GFX9-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
@@ -1629,7 +1467,6 @@ body:             |
     ; GFX9-NEXT: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX9-NEXT: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX9-NEXT: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX9-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s16>) = G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(<3 x s16>) = G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX9-NEXT: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1637,8 +1474,7 @@ body:             |
     ; GFX9-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX9-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-CONTRACT-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1659,7 +1495,6 @@ body:             |
     ; GFX9-CONTRACT-NEXT: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX9-CONTRACT-NEXT: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX9-CONTRACT-NEXT: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX9-CONTRACT-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s16>) = G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX9-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<3 x s16>) = G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX9-CONTRACT-NEXT: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1667,8 +1502,7 @@ body:             |
     ; GFX9-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX9-CONTRACT-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-CONTRACT-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-DENORM-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1689,7 +1523,6 @@ body:             |
     ; GFX9-DENORM-NEXT: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX9-DENORM-NEXT: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX9-DENORM-NEXT: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX9-DENORM-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s16>) = G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<3 x s16>) = G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX9-DENORM-NEXT: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1697,8 +1530,7 @@ body:             |
     ; GFX9-DENORM-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX9-DENORM-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-DENORM-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-DENORM-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-UNSAFE-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1719,7 +1551,6 @@ body:             |
     ; GFX9-UNSAFE-NEXT: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX9-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX9-UNSAFE-NEXT: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX9-UNSAFE-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s16>) = G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX9-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<3 x s16>) = G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX9-UNSAFE-NEXT: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1727,8 +1558,7 @@ body:             |
     ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX9-UNSAFE-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-UNSAFE-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1749,7 +1579,6 @@ body:             |
     ; GFX10-NEXT: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX10-NEXT: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX10-NEXT: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX10-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s16>) = G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(<3 x s16>) = G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX10-NEXT: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1757,8 +1586,7 @@ body:             |
     ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX10-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-CONTRACT-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1779,7 +1607,6 @@ body:             |
     ; GFX10-CONTRACT-NEXT: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX10-CONTRACT-NEXT: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX10-CONTRACT-NEXT: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX10-CONTRACT-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s16>) = G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX10-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<3 x s16>) = G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX10-CONTRACT-NEXT: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1787,8 +1614,7 @@ body:             |
     ; GFX10-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX10-CONTRACT-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-CONTRACT-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-CONTRACT-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-DENORM-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1809,7 +1635,6 @@ body:             |
     ; GFX10-DENORM-NEXT: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX10-DENORM-NEXT: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX10-DENORM-NEXT: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX10-DENORM-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s16>) = G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<3 x s16>) = G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX10-DENORM-NEXT: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1817,8 +1642,7 @@ body:             |
     ; GFX10-DENORM-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX10-DENORM-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-DENORM-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-DENORM-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-UNSAFE-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1839,7 +1663,6 @@ body:             |
     ; GFX10-UNSAFE-NEXT: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX10-UNSAFE-NEXT: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX10-UNSAFE-NEXT: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX10-UNSAFE-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s16>) = G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX10-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<3 x s16>) = G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX10-UNSAFE-NEXT: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1847,8 +1670,7 @@ body:             |
     ; GFX10-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX10-UNSAFE-NEXT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-UNSAFE-NEXT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-UNSAFE-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     %4:_(<2 x s16>) = COPY $vgpr0
     %5:_(<2 x s16>) = COPY $vgpr1
     %10:_(<2 x s16>) = G_IMPLICIT_DEF
@@ -1862,7 +1684,6 @@ body:             |
     %9:_(<2 x s16>) = COPY $vgpr5
     %15:_(<6 x s16>) = G_CONCAT_VECTORS %8(<2 x s16>), %9(<2 x s16>), %10(<2 x s16>)
     %2:_(<3 x s16>), %16:_(<3 x s16>) = G_UNMERGE_VALUES %15(<6 x s16>)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %17:_(<3 x s16>) = G_FMUL %0, %1
     %18:_(<3 x s16>) = G_FADD %2, %17
     %22:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1870,15 +1691,14 @@ body:             |
     %20:_(<2 x s16>), %21:_(<2 x s16>), %24:_(<2 x s16>) = G_UNMERGE_VALUES %23(<6 x s16>)
     $vgpr0 = COPY %20(<2 x s16>)
     $vgpr1 = COPY %21(<2 x s16>)
-    %19:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %19, implicit $vgpr0, implicit $vgpr1
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
 ...
 
 ---
 name:            test_4xdouble_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23
 
     ; GFX9-LABEL: name: test_4xdouble_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -1920,7 +1740,6 @@ body:             |
     ; GFX9-NEXT: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX9-NEXT: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX9-NEXT: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(<4 x s64>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -1932,8 +1751,7 @@ body:             |
     ; GFX9-NEXT: $vgpr5 = COPY [[UV5]](s32)
     ; GFX9-NEXT: $vgpr6 = COPY [[UV6]](s32)
     ; GFX9-NEXT: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX9-NEXT: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX9-CONTRACT-LABEL: name: test_4xdouble_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1974,7 +1792,6 @@ body:             |
     ; GFX9-CONTRACT-NEXT: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX9-CONTRACT-NEXT: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX9-CONTRACT-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX9-CONTRACT-NEXT: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<4 x s64>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -1986,8 +1803,7 @@ body:             |
     ; GFX9-CONTRACT-NEXT: $vgpr5 = COPY [[UV5]](s32)
     ; GFX9-CONTRACT-NEXT: $vgpr6 = COPY [[UV6]](s32)
     ; GFX9-CONTRACT-NEXT: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX9-DENORM-LABEL: name: test_4xdouble_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2028,7 +1844,6 @@ body:             |
     ; GFX9-DENORM-NEXT: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX9-DENORM-NEXT: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX9-DENORM-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX9-DENORM-NEXT: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<4 x s64>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -2040,8 +1855,7 @@ body:             |
     ; GFX9-DENORM-NEXT: $vgpr5 = COPY [[UV5]](s32)
     ; GFX9-DENORM-NEXT: $vgpr6 = COPY [[UV6]](s32)
     ; GFX9-DENORM-NEXT: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX9-UNSAFE-LABEL: name: test_4xdouble_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2082,7 +1896,6 @@ body:             |
     ; GFX9-UNSAFE-NEXT: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX9-UNSAFE-NEXT: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX9-UNSAFE-NEXT: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<4 x s64>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -2094,8 +1907,7 @@ body:             |
     ; GFX9-UNSAFE-NEXT: $vgpr5 = COPY [[UV5]](s32)
     ; GFX9-UNSAFE-NEXT: $vgpr6 = COPY [[UV6]](s32)
     ; GFX9-UNSAFE-NEXT: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX10-LABEL: name: test_4xdouble_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2136,7 +1948,6 @@ body:             |
     ; GFX10-NEXT: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX10-NEXT: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX10-NEXT: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(<4 x s64>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -2148,8 +1959,7 @@ body:             |
     ; GFX10-NEXT: $vgpr5 = COPY [[UV5]](s32)
     ; GFX10-NEXT: $vgpr6 = COPY [[UV6]](s32)
     ; GFX10-NEXT: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX10-NEXT: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX10-CONTRACT-LABEL: name: test_4xdouble_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2190,7 +2000,6 @@ body:             |
     ; GFX10-CONTRACT-NEXT: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX10-CONTRACT-NEXT: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX10-CONTRACT-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX10-CONTRACT-NEXT: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<4 x s64>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -2202,8 +2011,7 @@ body:             |
     ; GFX10-CONTRACT-NEXT: $vgpr5 = COPY [[UV5]](s32)
     ; GFX10-CONTRACT-NEXT: $vgpr6 = COPY [[UV6]](s32)
     ; GFX10-CONTRACT-NEXT: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX10-DENORM-LABEL: name: test_4xdouble_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2244,7 +2052,6 @@ body:             |
     ; GFX10-DENORM-NEXT: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX10-DENORM-NEXT: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX10-DENORM-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX10-DENORM-NEXT: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<4 x s64>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -2256,8 +2063,7 @@ body:             |
     ; GFX10-DENORM-NEXT: $vgpr5 = COPY [[UV5]](s32)
     ; GFX10-DENORM-NEXT: $vgpr6 = COPY [[UV6]](s32)
     ; GFX10-DENORM-NEXT: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX10-UNSAFE-LABEL: name: test_4xdouble_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2298,7 +2104,6 @@ body:             |
     ; GFX10-UNSAFE-NEXT: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX10-UNSAFE-NEXT: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX10-UNSAFE-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX10-UNSAFE-NEXT: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<4 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<4 x s64>) = G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -2310,8 +2115,7 @@ body:             |
     ; GFX10-UNSAFE-NEXT: $vgpr5 = COPY [[UV5]](s32)
     ; GFX10-UNSAFE-NEXT: $vgpr6 = COPY [[UV6]](s32)
     ; GFX10-UNSAFE-NEXT: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %6:_(s32) = COPY $vgpr2
@@ -2351,7 +2155,6 @@ body:             |
     %38:_(s64) = G_MERGE_VALUES %24(s32), %25(s32)
     %39:_(s64) = G_MERGE_VALUES %26(s32), %27(s32)
     %2:_(<4 x s64>) = G_BUILD_VECTOR %36(s64), %37(s64), %38(s64), %39(s64)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %40:_(<4 x s64>) = G_FMUL %0, %1
     %41:_(<4 x s64>) = G_FADD %40, %2
     %43:_(s32), %44:_(s32), %45:_(s32), %46:_(s32), %47:_(s32), %48:_(s32), %49:_(s32), %50:_(s32) = G_UNMERGE_VALUES %41(<4 x s64>)
@@ -2363,15 +2166,14 @@ body:             |
     $vgpr5 = COPY %48(s32)
     $vgpr6 = COPY %49(s32)
     $vgpr7 = COPY %50(s32)
-    %42:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %42, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
 ...
 
 ---
 name:            test_3xdouble_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17
 
     ; GFX9-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -2404,7 +2206,6 @@ body:             |
     ; GFX9-NEXT: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX9-NEXT: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX9-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX9-NEXT: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-NEXT: [[FADD:%[0-9]+]]:_(<3 x s64>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2414,8 +2215,7 @@ body:             |
     ; GFX9-NEXT: $vgpr3 = COPY [[UV3]](s32)
     ; GFX9-NEXT: $vgpr4 = COPY [[UV4]](s32)
     ; GFX9-NEXT: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX9-NEXT: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX9-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX9-CONTRACT-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2447,7 +2247,6 @@ body:             |
     ; GFX9-CONTRACT-NEXT: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX9-CONTRACT-NEXT: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX9-CONTRACT-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX9-CONTRACT-NEXT: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<3 x s64>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2457,8 +2256,7 @@ body:             |
     ; GFX9-CONTRACT-NEXT: $vgpr3 = COPY [[UV3]](s32)
     ; GFX9-CONTRACT-NEXT: $vgpr4 = COPY [[UV4]](s32)
     ; GFX9-CONTRACT-NEXT: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX9-CONTRACT-NEXT: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX9-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX9-DENORM-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2490,7 +2288,6 @@ body:             |
     ; GFX9-DENORM-NEXT: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX9-DENORM-NEXT: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX9-DENORM-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX9-DENORM-NEXT: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<3 x s64>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2500,8 +2297,7 @@ body:             |
     ; GFX9-DENORM-NEXT: $vgpr3 = COPY [[UV3]](s32)
     ; GFX9-DENORM-NEXT: $vgpr4 = COPY [[UV4]](s32)
     ; GFX9-DENORM-NEXT: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX9-DENORM-NEXT: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX9-DENORM-NEXT: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX9-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX9-UNSAFE-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2533,7 +2329,6 @@ body:             |
     ; GFX9-UNSAFE-NEXT: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX9-UNSAFE-NEXT: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX9-UNSAFE-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX9-UNSAFE-NEXT: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<3 x s64>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2543,8 +2338,7 @@ body:             |
     ; GFX9-UNSAFE-NEXT: $vgpr3 = COPY [[UV3]](s32)
     ; GFX9-UNSAFE-NEXT: $vgpr4 = COPY [[UV4]](s32)
     ; GFX9-UNSAFE-NEXT: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX9-UNSAFE-NEXT: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX9-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX10-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2576,7 +2370,6 @@ body:             |
     ; GFX10-NEXT: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX10-NEXT: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX10-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX10-NEXT: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-NEXT: [[FADD:%[0-9]+]]:_(<3 x s64>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2586,8 +2379,7 @@ body:             |
     ; GFX10-NEXT: $vgpr3 = COPY [[UV3]](s32)
     ; GFX10-NEXT: $vgpr4 = COPY [[UV4]](s32)
     ; GFX10-NEXT: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX10-NEXT: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX10-NEXT: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX10-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX10-CONTRACT-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2619,7 +2411,6 @@ body:             |
     ; GFX10-CONTRACT-NEXT: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX10-CONTRACT-NEXT: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX10-CONTRACT-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX10-CONTRACT-NEXT: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-CONTRACT-NEXT: [[FADD:%[0-9]+]]:_(<3 x s64>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10-CONTRACT-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2629,8 +2420,7 @@ body:             |
     ; GFX10-CONTRACT-NEXT: $vgpr3 = COPY [[UV3]](s32)
     ; GFX10-CONTRACT-NEXT: $vgpr4 = COPY [[UV4]](s32)
     ; GFX10-CONTRACT-NEXT: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX10-CONTRACT-NEXT: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX10-CONTRACT-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX10-DENORM-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2662,7 +2452,6 @@ body:             |
     ; GFX10-DENORM-NEXT: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX10-DENORM-NEXT: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX10-DENORM-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX10-DENORM-NEXT: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-DENORM-NEXT: [[FADD:%[0-9]+]]:_(<3 x s64>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10-DENORM-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2672,8 +2461,7 @@ body:             |
     ; GFX10-DENORM-NEXT: $vgpr3 = COPY [[UV3]](s32)
     ; GFX10-DENORM-NEXT: $vgpr4 = COPY [[UV4]](s32)
     ; GFX10-DENORM-NEXT: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX10-DENORM-NEXT: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX10-DENORM-NEXT: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX10-DENORM-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX10-UNSAFE-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2705,7 +2493,6 @@ body:             |
     ; GFX10-UNSAFE-NEXT: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX10-UNSAFE-NEXT: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX10-UNSAFE-NEXT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX10-UNSAFE-NEXT: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE-NEXT: [[FMUL:%[0-9]+]]:_(<3 x s64>) = G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-UNSAFE-NEXT: [[FADD:%[0-9]+]]:_(<3 x s64>) = G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10-UNSAFE-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2715,8 +2502,7 @@ body:             |
     ; GFX10-UNSAFE-NEXT: $vgpr3 = COPY [[UV3]](s32)
     ; GFX10-UNSAFE-NEXT: $vgpr4 = COPY [[UV4]](s32)
     ; GFX10-UNSAFE-NEXT: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX10-UNSAFE-NEXT: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX10-UNSAFE-NEXT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %6:_(s32) = COPY $vgpr2
@@ -2747,7 +2533,6 @@ body:             |
     %29:_(s64) = G_MERGE_VALUES %18(s32), %19(s32)
     %30:_(s64) = G_MERGE_VALUES %20(s32), %21(s32)
     %2:_(<3 x s64>) = G_BUILD_VECTOR %28(s64), %29(s64), %30(s64)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %31:_(<3 x s64>) = G_FMUL %0, %1
     %32:_(<3 x s64>) = G_FADD %2, %31
     %34:_(s32), %35:_(s32), %36:_(s32), %37:_(s32), %38:_(s32), %39:_(s32) = G_UNMERGE_VALUES %32(<3 x s64>)
@@ -2757,6 +2542,5 @@ body:             |
     $vgpr3 = COPY %37(s32)
     $vgpr4 = COPY %38(s32)
     $vgpr5 = COPY %39(s32)
-    %33:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %33, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-mul-pre-legalize.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-mul-pre-legalize.mir
index 67d0fd31db6b4..d6ef87759e9d7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-mul-pre-legalize.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fma-add-mul-pre-legalize.mir
@@ -12,193 +12,157 @@
 name:            test_f32_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2
 
     ; GFX9-LABEL: name: test_f32_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = reassoc G_FMUL [[COPY]], [[COPY1]]
     ; GFX9: [[FADD:%[0-9]+]]:_(s32) = reassoc G_FADD [[FMUL]], [[COPY2]]
     ; GFX9: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX9: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-CONTRACT-LABEL: name: test_f32_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-CONTRACT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-CONTRACT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX9-CONTRACT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX9-CONTRACT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-DENORM-LABEL: name: test_f32_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-DENORM: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(s32) = reassoc G_FMUL [[COPY]], [[COPY1]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(s32) = reassoc G_FADD [[FMUL]], [[COPY2]]
     ; GFX9-DENORM: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX9-DENORM: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-UNSAFE-LABEL: name: test_f32_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-UNSAFE: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-UNSAFE: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX9-UNSAFE: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX9-UNSAFE: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-LABEL: name: test_f32_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(s32) = reassoc G_FMUL [[COPY]], [[COPY1]]
     ; GFX10: [[FADD:%[0-9]+]]:_(s32) = reassoc G_FADD [[FMUL]], [[COPY2]]
     ; GFX10: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX10: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-CONTRACT-LABEL: name: test_f32_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-CONTRACT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-CONTRACT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX10-CONTRACT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX10-CONTRACT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-DENORM-LABEL: name: test_f32_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-DENORM: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(s32) = reassoc G_FMUL [[COPY]], [[COPY1]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(s32) = reassoc G_FADD [[FMUL]], [[COPY2]]
     ; GFX10-DENORM: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX10-DENORM: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-UNSAFE-LABEL: name: test_f32_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-UNSAFE: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-UNSAFE: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX10-UNSAFE: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX10-UNSAFE: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = COPY $vgpr2
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %4:_(s32) = reassoc G_FMUL %0, %1
     %5:_(s32) = reassoc G_FADD %4, %2
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
 name:            test_f32_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2
 
     ; GFX9-LABEL: name: test_f32_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(s32) = reassoc G_FMUL [[COPY]], [[COPY1]]
     ; GFX9: [[FADD:%[0-9]+]]:_(s32) = reassoc G_FADD [[COPY2]], [[FMUL]]
     ; GFX9: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX9: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-CONTRACT-LABEL: name: test_f32_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-CONTRACT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-CONTRACT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX9-CONTRACT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX9-CONTRACT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-DENORM-LABEL: name: test_f32_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-DENORM: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(s32) = reassoc G_FMUL [[COPY]], [[COPY1]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(s32) = reassoc G_FADD [[COPY2]], [[FMUL]]
     ; GFX9-DENORM: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX9-DENORM: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-UNSAFE-LABEL: name: test_f32_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX9-UNSAFE: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX9-UNSAFE: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX9-UNSAFE: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX9-UNSAFE: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-LABEL: name: test_f32_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(s32) = reassoc G_FMUL [[COPY]], [[COPY1]]
     ; GFX10: [[FADD:%[0-9]+]]:_(s32) = reassoc G_FADD [[COPY2]], [[FMUL]]
     ; GFX10: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX10: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-CONTRACT-LABEL: name: test_f32_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-CONTRACT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-CONTRACT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX10-CONTRACT: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX10-CONTRACT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-DENORM-LABEL: name: test_f32_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-DENORM: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(s32) = reassoc G_FMUL [[COPY]], [[COPY1]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(s32) = reassoc G_FADD [[COPY2]], [[FMUL]]
     ; GFX10-DENORM: $vgpr0 = COPY [[FADD]](s32)
-    ; GFX10-DENORM: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-UNSAFE-LABEL: name: test_f32_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; GFX10-UNSAFE: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-    ; GFX10-UNSAFE: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(s32) = G_FMA [[COPY]], [[COPY1]], [[COPY2]]
     ; GFX10-UNSAFE: $vgpr0 = COPY [[FMA]](s32)
-    ; GFX10-UNSAFE: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     %0:_(s32) = COPY $vgpr0
     %1:_(s32) = COPY $vgpr1
     %2:_(s32) = COPY $vgpr2
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %4:_(s32) = reassoc G_FMUL %0, %1
     %5:_(s32) = reassoc G_FADD %2, %4
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
 name:            test_half_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2
 
     ; GFX9-LABEL: name: test_half_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -207,13 +171,11 @@ body:             |
     ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(s16) = reassoc G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX9: [[FADD:%[0-9]+]]:_(s16) = reassoc G_FADD [[FMUL]], [[TRUNC2]]
     ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-CONTRACT-LABEL: name: test_half_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -221,12 +183,10 @@ body:             |
     ; GFX9-CONTRACT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-CONTRACT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-CONTRACT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-CONTRACT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX9-CONTRACT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX9-CONTRACT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-CONTRACT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-DENORM-LABEL: name: test_half_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -234,13 +194,11 @@ body:             |
     ; GFX9-DENORM: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-DENORM: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-DENORM: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(s16) = reassoc G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(s16) = reassoc G_FADD [[FMUL]], [[TRUNC2]]
     ; GFX9-DENORM: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX9-DENORM: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-DENORM: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-UNSAFE-LABEL: name: test_half_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -248,12 +206,10 @@ body:             |
     ; GFX9-UNSAFE: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-UNSAFE: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-UNSAFE: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-UNSAFE: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX9-UNSAFE: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX9-UNSAFE: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-UNSAFE: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-LABEL: name: test_half_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -261,13 +217,11 @@ body:             |
     ; GFX10: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(s16) = reassoc G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX10: [[FADD:%[0-9]+]]:_(s16) = reassoc G_FADD [[FMUL]], [[TRUNC2]]
     ; GFX10: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX10: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-CONTRACT-LABEL: name: test_half_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -275,12 +229,10 @@ body:             |
     ; GFX10-CONTRACT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-CONTRACT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-CONTRACT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-CONTRACT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX10-CONTRACT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX10-CONTRACT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-CONTRACT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-DENORM-LABEL: name: test_half_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -288,13 +240,11 @@ body:             |
     ; GFX10-DENORM: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-DENORM: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-DENORM: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(s16) = reassoc G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(s16) = reassoc G_FADD [[FMUL]], [[TRUNC2]]
     ; GFX10-DENORM: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX10-DENORM: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-DENORM: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-UNSAFE-LABEL: name: test_half_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -302,32 +252,28 @@ body:             |
     ; GFX10-UNSAFE: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-UNSAFE: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-UNSAFE: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-UNSAFE: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX10-UNSAFE: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX10-UNSAFE: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-UNSAFE: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     %4:_(s32) = COPY $vgpr0
     %0:_(s16) = G_TRUNC %4(s32)
     %5:_(s32) = COPY $vgpr1
     %1:_(s16) = G_TRUNC %5(s32)
     %6:_(s32) = COPY $vgpr2
     %2:_(s16) = G_TRUNC %6(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %7:_(s16) = reassoc G_FMUL %0, %1
     %8:_(s16) = reassoc G_FADD %7, %2
     %10:_(s32) = G_ANYEXT %8(s16)
     $vgpr0 = COPY %10(s32)
-    %9:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %9, implicit $vgpr0
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
 name:            test_half_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2
 
     ; GFX9-LABEL: name: test_half_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -336,13 +282,11 @@ body:             |
     ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(s16) = reassoc G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX9: [[FADD:%[0-9]+]]:_(s16) = reassoc G_FADD [[TRUNC2]], [[FMUL]]
     ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-CONTRACT-LABEL: name: test_half_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -350,12 +294,10 @@ body:             |
     ; GFX9-CONTRACT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-CONTRACT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-CONTRACT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-CONTRACT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX9-CONTRACT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX9-CONTRACT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-CONTRACT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-DENORM-LABEL: name: test_half_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -363,13 +305,11 @@ body:             |
     ; GFX9-DENORM: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-DENORM: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-DENORM: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(s16) = reassoc G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(s16) = reassoc G_FADD [[TRUNC2]], [[FMUL]]
     ; GFX9-DENORM: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX9-DENORM: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-DENORM: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX9-UNSAFE-LABEL: name: test_half_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -377,12 +317,10 @@ body:             |
     ; GFX9-UNSAFE: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX9-UNSAFE: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX9-UNSAFE: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX9-UNSAFE: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX9-UNSAFE: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX9-UNSAFE: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX9-UNSAFE: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-LABEL: name: test_half_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -390,13 +328,11 @@ body:             |
     ; GFX10: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(s16) = reassoc G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX10: [[FADD:%[0-9]+]]:_(s16) = reassoc G_FADD [[TRUNC2]], [[FMUL]]
     ; GFX10: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX10: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-CONTRACT-LABEL: name: test_half_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -404,12 +340,10 @@ body:             |
     ; GFX10-CONTRACT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-CONTRACT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-CONTRACT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-CONTRACT: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX10-CONTRACT: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX10-CONTRACT: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-CONTRACT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-DENORM-LABEL: name: test_half_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -417,13 +351,11 @@ body:             |
     ; GFX10-DENORM: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-DENORM: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-DENORM: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-DENORM: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(s16) = reassoc G_FMUL [[TRUNC]], [[TRUNC1]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(s16) = reassoc G_FADD [[TRUNC2]], [[FMUL]]
     ; GFX10-DENORM: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FADD]](s16)
     ; GFX10-DENORM: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-DENORM: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     ; GFX10-UNSAFE-LABEL: name: test_half_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -431,32 +363,28 @@ body:             |
     ; GFX10-UNSAFE: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX10-UNSAFE: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
     ; GFX10-UNSAFE: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-    ; GFX10-UNSAFE: [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(s16) = G_FMA [[TRUNC]], [[TRUNC1]], [[TRUNC2]]
     ; GFX10-UNSAFE: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[FMA]](s16)
     ; GFX10-UNSAFE: $vgpr0 = COPY [[ANYEXT]](s32)
-    ; GFX10-UNSAFE: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
     %4:_(s32) = COPY $vgpr0
     %0:_(s16) = G_TRUNC %4(s32)
     %5:_(s32) = COPY $vgpr1
     %1:_(s16) = G_TRUNC %5(s32)
     %6:_(s32) = COPY $vgpr2
     %2:_(s16) = G_TRUNC %6(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %7:_(s16) = reassoc G_FMUL %0, %1
     %8:_(s16) = reassoc G_FADD %2, %7
     %10:_(s32) = G_ANYEXT %8(s16)
     $vgpr0 = COPY %10(s32)
-    %9:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %9, implicit $vgpr0
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
 name:            test_double_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
 
     ; GFX9-LABEL: name: test_double_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -468,14 +396,12 @@ body:             |
     ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(s64) = reassoc G_FMUL [[MV]], [[MV1]]
     ; GFX9: [[FADD:%[0-9]+]]:_(s64) = reassoc G_FADD [[FMUL]], [[MV2]]
     ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX9: $vgpr0 = COPY [[UV]](s32)
     ; GFX9: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-CONTRACT-LABEL: name: test_double_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -486,13 +412,11 @@ body:             |
     ; GFX9-CONTRACT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-CONTRACT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-CONTRACT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-CONTRACT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX9-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX9-CONTRACT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-CONTRACT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-CONTRACT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-DENORM-LABEL: name: test_double_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -503,14 +427,12 @@ body:             |
     ; GFX9-DENORM: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-DENORM: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-DENORM: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-DENORM: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(s64) = reassoc G_FMUL [[MV]], [[MV1]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(s64) = reassoc G_FADD [[FMUL]], [[MV2]]
     ; GFX9-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX9-DENORM: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-DENORM: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-DENORM: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-UNSAFE-LABEL: name: test_double_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -521,13 +443,11 @@ body:             |
     ; GFX9-UNSAFE: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-UNSAFE: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-UNSAFE: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-UNSAFE: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX9-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX9-UNSAFE: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-UNSAFE: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-UNSAFE: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-LABEL: name: test_double_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -538,14 +458,12 @@ body:             |
     ; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(s64) = reassoc G_FMUL [[MV]], [[MV1]]
     ; GFX10: [[FADD:%[0-9]+]]:_(s64) = reassoc G_FADD [[FMUL]], [[MV2]]
     ; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX10: $vgpr0 = COPY [[UV]](s32)
     ; GFX10: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-CONTRACT-LABEL: name: test_double_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -556,13 +474,11 @@ body:             |
     ; GFX10-CONTRACT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-CONTRACT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-CONTRACT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-CONTRACT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX10-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX10-CONTRACT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-CONTRACT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-CONTRACT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-DENORM-LABEL: name: test_double_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -573,14 +489,12 @@ body:             |
     ; GFX10-DENORM: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-DENORM: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-DENORM: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-DENORM: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(s64) = reassoc G_FMUL [[MV]], [[MV1]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(s64) = reassoc G_FADD [[FMUL]], [[MV2]]
     ; GFX10-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX10-DENORM: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-DENORM: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-DENORM: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-UNSAFE-LABEL: name: test_double_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -591,13 +505,11 @@ body:             |
     ; GFX10-UNSAFE: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-UNSAFE: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-UNSAFE: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-UNSAFE: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX10-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX10-UNSAFE: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-UNSAFE: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-UNSAFE: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %0:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
@@ -607,21 +519,19 @@ body:             |
     %8:_(s32) = COPY $vgpr4
     %9:_(s32) = COPY $vgpr5
     %2:_(s64) = G_MERGE_VALUES %8(s32), %9(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %10:_(s64) = reassoc G_FMUL %0, %1
     %11:_(s64) = reassoc G_FADD %10, %2
     %13:_(s32), %14:_(s32) = G_UNMERGE_VALUES %11(s64)
     $vgpr0 = COPY %13(s32)
     $vgpr1 = COPY %14(s32)
-    %12:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %12, implicit $vgpr0, implicit $vgpr1
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
 ...
 
 ---
 name:            test_double_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
 
     ; GFX9-LABEL: name: test_double_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -633,14 +543,12 @@ body:             |
     ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(s64) = reassoc G_FMUL [[MV]], [[MV1]]
     ; GFX9: [[FADD:%[0-9]+]]:_(s64) = reassoc G_FADD [[MV2]], [[FMUL]]
     ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX9: $vgpr0 = COPY [[UV]](s32)
     ; GFX9: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-CONTRACT-LABEL: name: test_double_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -651,13 +559,11 @@ body:             |
     ; GFX9-CONTRACT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-CONTRACT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-CONTRACT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-CONTRACT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX9-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX9-CONTRACT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-CONTRACT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-CONTRACT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-DENORM-LABEL: name: test_double_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -668,14 +574,12 @@ body:             |
     ; GFX9-DENORM: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-DENORM: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-DENORM: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-DENORM: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(s64) = reassoc G_FMUL [[MV]], [[MV1]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(s64) = reassoc G_FADD [[MV2]], [[FMUL]]
     ; GFX9-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX9-DENORM: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-DENORM: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-DENORM: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-UNSAFE-LABEL: name: test_double_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -686,13 +590,11 @@ body:             |
     ; GFX9-UNSAFE: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX9-UNSAFE: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX9-UNSAFE: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX9-UNSAFE: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX9-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX9-UNSAFE: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-UNSAFE: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX9-UNSAFE: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-LABEL: name: test_double_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -703,14 +605,12 @@ body:             |
     ; GFX10: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(s64) = reassoc G_FMUL [[MV]], [[MV1]]
     ; GFX10: [[FADD:%[0-9]+]]:_(s64) = reassoc G_FADD [[MV2]], [[FMUL]]
     ; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX10: $vgpr0 = COPY [[UV]](s32)
     ; GFX10: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-CONTRACT-LABEL: name: test_double_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -721,13 +621,11 @@ body:             |
     ; GFX10-CONTRACT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-CONTRACT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-CONTRACT: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-CONTRACT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX10-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX10-CONTRACT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-CONTRACT: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-CONTRACT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-DENORM-LABEL: name: test_double_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -738,14 +636,12 @@ body:             |
     ; GFX10-DENORM: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-DENORM: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-DENORM: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-DENORM: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(s64) = reassoc G_FMUL [[MV]], [[MV1]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(s64) = reassoc G_FADD [[MV2]], [[FMUL]]
     ; GFX10-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](s64)
     ; GFX10-DENORM: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-DENORM: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-DENORM: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-UNSAFE-LABEL: name: test_double_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -756,13 +652,11 @@ body:             |
     ; GFX10-UNSAFE: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX10-UNSAFE: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
     ; GFX10-UNSAFE: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-    ; GFX10-UNSAFE: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(s64) = G_FMA [[MV]], [[MV1]], [[MV2]]
     ; GFX10-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](s64)
     ; GFX10-UNSAFE: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-UNSAFE: $vgpr1 = COPY [[UV1]](s32)
-    ; GFX10-UNSAFE: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %0:_(s64) = G_MERGE_VALUES %4(s32), %5(s32)
@@ -772,21 +666,19 @@ body:             |
     %8:_(s32) = COPY $vgpr4
     %9:_(s32) = COPY $vgpr5
     %2:_(s64) = G_MERGE_VALUES %8(s32), %9(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %10:_(s64) = reassoc G_FMUL %0, %1
     %11:_(s64) = reassoc G_FADD %2, %10
     %13:_(s32), %14:_(s32) = G_UNMERGE_VALUES %11(s64)
     $vgpr0 = COPY %13(s32)
     $vgpr1 = COPY %14(s32)
-    %12:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %12, implicit $vgpr0, implicit $vgpr1
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
 ...
 
 ---
 name:            test_4xfloat_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11
 
     ; GFX9-LABEL: name: test_4xfloat_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -804,7 +696,6 @@ body:             |
     ; GFX9: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX9: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX9: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX9: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(<4 x s32>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9: [[FADD:%[0-9]+]]:_(<4 x s32>) = reassoc G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -812,8 +703,7 @@ body:             |
     ; GFX9: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9: $vgpr2 = COPY [[UV2]](s32)
     ; GFX9: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX9: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX9: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX9-CONTRACT-LABEL: name: test_4xfloat_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -830,15 +720,13 @@ body:             |
     ; GFX9-CONTRACT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX9-CONTRACT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX9-CONTRACT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX9-CONTRACT: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(<4 x s32>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX9-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<4 x s32>)
     ; GFX9-CONTRACT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-CONTRACT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-CONTRACT: $vgpr2 = COPY [[UV2]](s32)
     ; GFX9-CONTRACT: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX9-CONTRACT: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX9-DENORM-LABEL: name: test_4xfloat_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -855,7 +743,6 @@ body:             |
     ; GFX9-DENORM: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX9-DENORM: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX9-DENORM: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX9-DENORM: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(<4 x s32>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(<4 x s32>) = reassoc G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -863,8 +750,7 @@ body:             |
     ; GFX9-DENORM: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-DENORM: $vgpr2 = COPY [[UV2]](s32)
     ; GFX9-DENORM: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX9-DENORM: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX9-UNSAFE-LABEL: name: test_4xfloat_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -881,15 +767,13 @@ body:             |
     ; GFX9-UNSAFE: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX9-UNSAFE: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX9-UNSAFE: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX9-UNSAFE: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(<4 x s32>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX9-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<4 x s32>)
     ; GFX9-UNSAFE: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-UNSAFE: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-UNSAFE: $vgpr2 = COPY [[UV2]](s32)
     ; GFX9-UNSAFE: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX9-UNSAFE: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX10-LABEL: name: test_4xfloat_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -906,7 +790,6 @@ body:             |
     ; GFX10: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX10: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX10: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX10: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(<4 x s32>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10: [[FADD:%[0-9]+]]:_(<4 x s32>) = reassoc G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -914,8 +797,7 @@ body:             |
     ; GFX10: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10: $vgpr2 = COPY [[UV2]](s32)
     ; GFX10: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX10: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX10: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX10-CONTRACT-LABEL: name: test_4xfloat_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -932,15 +814,13 @@ body:             |
     ; GFX10-CONTRACT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX10-CONTRACT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX10-CONTRACT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX10-CONTRACT: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(<4 x s32>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX10-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<4 x s32>)
     ; GFX10-CONTRACT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-CONTRACT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-CONTRACT: $vgpr2 = COPY [[UV2]](s32)
     ; GFX10-CONTRACT: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX10-CONTRACT: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX10-DENORM-LABEL: name: test_4xfloat_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -957,7 +837,6 @@ body:             |
     ; GFX10-DENORM: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX10-DENORM: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX10-DENORM: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX10-DENORM: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(<4 x s32>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(<4 x s32>) = reassoc G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s32>)
@@ -965,8 +844,7 @@ body:             |
     ; GFX10-DENORM: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-DENORM: $vgpr2 = COPY [[UV2]](s32)
     ; GFX10-DENORM: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX10-DENORM: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     ; GFX10-UNSAFE-LABEL: name: test_4xfloat_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -983,15 +861,13 @@ body:             |
     ; GFX10-UNSAFE: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX10-UNSAFE: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
     ; GFX10-UNSAFE: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32)
-    ; GFX10-UNSAFE: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(<4 x s32>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX10-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<4 x s32>)
     ; GFX10-UNSAFE: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-UNSAFE: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-UNSAFE: $vgpr2 = COPY [[UV2]](s32)
     ; GFX10-UNSAFE: $vgpr3 = COPY [[UV3]](s32)
-    ; GFX10-UNSAFE: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %6:_(s32) = COPY $vgpr2
@@ -1007,7 +883,6 @@ body:             |
     %14:_(s32) = COPY $vgpr10
     %15:_(s32) = COPY $vgpr11
     %2:_(<4 x s32>) = G_BUILD_VECTOR %12(s32), %13(s32), %14(s32), %15(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %16:_(<4 x s32>) = reassoc G_FMUL %0, %1
     %17:_(<4 x s32>) = reassoc G_FADD %16, %2
     %19:_(s32), %20:_(s32), %21:_(s32), %22:_(s32) = G_UNMERGE_VALUES %17(<4 x s32>)
@@ -1015,15 +890,14 @@ body:             |
     $vgpr1 = COPY %20(s32)
     $vgpr2 = COPY %21(s32)
     $vgpr3 = COPY %22(s32)
-    %18:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %18, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
 ...
 
 ---
 name:            test_3xfloat_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8
 
     ; GFX9-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -1038,15 +912,13 @@ body:             |
     ; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX9: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX9: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX9: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(<3 x s32>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9: [[FADD:%[0-9]+]]:_(<3 x s32>) = reassoc G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX9: $vgpr0 = COPY [[UV]](s32)
     ; GFX9: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX9: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX9: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX9-CONTRACT-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1060,14 +932,12 @@ body:             |
     ; GFX9-CONTRACT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX9-CONTRACT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX9-CONTRACT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX9-CONTRACT: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(<3 x s32>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX9-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<3 x s32>)
     ; GFX9-CONTRACT: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-CONTRACT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-CONTRACT: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX9-CONTRACT: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX9-DENORM-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1081,15 +951,13 @@ body:             |
     ; GFX9-DENORM: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX9-DENORM: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX9-DENORM: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX9-DENORM: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(<3 x s32>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(<3 x s32>) = reassoc G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX9-DENORM: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-DENORM: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-DENORM: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX9-DENORM: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX9-UNSAFE-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1103,14 +971,12 @@ body:             |
     ; GFX9-UNSAFE: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX9-UNSAFE: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX9-UNSAFE: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX9-UNSAFE: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(<3 x s32>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX9-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<3 x s32>)
     ; GFX9-UNSAFE: $vgpr0 = COPY [[UV]](s32)
     ; GFX9-UNSAFE: $vgpr1 = COPY [[UV1]](s32)
     ; GFX9-UNSAFE: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX9-UNSAFE: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX10-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1124,15 +990,13 @@ body:             |
     ; GFX10: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX10: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX10: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX10: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(<3 x s32>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10: [[FADD:%[0-9]+]]:_(<3 x s32>) = reassoc G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX10: $vgpr0 = COPY [[UV]](s32)
     ; GFX10: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX10: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX10: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX10-CONTRACT-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1146,14 +1010,12 @@ body:             |
     ; GFX10-CONTRACT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX10-CONTRACT: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX10-CONTRACT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX10-CONTRACT: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(<3 x s32>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX10-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<3 x s32>)
     ; GFX10-CONTRACT: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-CONTRACT: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-CONTRACT: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX10-CONTRACT: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX10-DENORM-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1167,15 +1029,13 @@ body:             |
     ; GFX10-DENORM: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX10-DENORM: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX10-DENORM: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX10-DENORM: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(<3 x s32>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(<3 x s32>) = reassoc G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s32>)
     ; GFX10-DENORM: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-DENORM: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-DENORM: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX10-DENORM: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     ; GFX10-UNSAFE-LABEL: name: test_3xfloat_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1189,14 +1049,12 @@ body:             |
     ; GFX10-UNSAFE: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
     ; GFX10-UNSAFE: [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr8
     ; GFX10-UNSAFE: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32)
-    ; GFX10-UNSAFE: [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(<3 x s32>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX10-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<3 x s32>)
     ; GFX10-UNSAFE: $vgpr0 = COPY [[UV]](s32)
     ; GFX10-UNSAFE: $vgpr1 = COPY [[UV1]](s32)
     ; GFX10-UNSAFE: $vgpr2 = COPY [[UV2]](s32)
-    ; GFX10-UNSAFE: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %6:_(s32) = COPY $vgpr2
@@ -1209,22 +1067,20 @@ body:             |
     %11:_(s32) = COPY $vgpr7
     %12:_(s32) = COPY $vgpr8
     %2:_(<3 x s32>) = G_BUILD_VECTOR %10(s32), %11(s32), %12(s32)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %13:_(<3 x s32>) = reassoc G_FMUL %0, %1
     %14:_(<3 x s32>) = reassoc G_FADD %2, %13
     %16:_(s32), %17:_(s32), %18:_(s32) = G_UNMERGE_VALUES %14(<3 x s32>)
     $vgpr0 = COPY %16(s32)
     $vgpr1 = COPY %17(s32)
     $vgpr2 = COPY %18(s32)
-    %15:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %15, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
 ...
 
 ---
 name:            test_4xhalf_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
 
     ; GFX9-LABEL: name: test_4xhalf_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
@@ -1236,14 +1092,12 @@ body:             |
     ; GFX9: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX9: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX9: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX9: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(<4 x s16>) = reassoc G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX9: [[FADD:%[0-9]+]]:_(<4 x s16>) = reassoc G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX9: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-CONTRACT-LABEL: name: test_4xhalf_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-CONTRACT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1254,13 +1108,11 @@ body:             |
     ; GFX9-CONTRACT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX9-CONTRACT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX9-CONTRACT: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX9-CONTRACT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(<4 x s16>) = G_FMA [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]], [[CONCAT_VECTORS2]]
     ; GFX9-CONTRACT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FMA]](<4 x s16>)
     ; GFX9-CONTRACT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-CONTRACT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-CONTRACT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-DENORM-LABEL: name: test_4xhalf_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-DENORM: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1271,14 +1123,12 @@ body:             |
     ; GFX9-DENORM: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX9-DENORM: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX9-DENORM: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX9-DENORM: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(<4 x s16>) = reassoc G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(<4 x s16>) = reassoc G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX9-DENORM: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX9-DENORM: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-DENORM: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-DENORM: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-UNSAFE-LABEL: name: test_4xhalf_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-UNSAFE: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1289,13 +1139,11 @@ body:             |
     ; GFX9-UNSAFE: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX9-UNSAFE: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX9-UNSAFE: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX9-UNSAFE: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(<4 x s16>) = G_FMA [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]], [[CONCAT_VECTORS2]]
     ; GFX9-UNSAFE: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FMA]](<4 x s16>)
     ; GFX9-UNSAFE: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-UNSAFE: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-UNSAFE: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-LABEL: name: test_4xhalf_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1306,14 +1154,12 @@ body:             |
     ; GFX10: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX10: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX10: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX10: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(<4 x s16>) = reassoc G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX10: [[FADD:%[0-9]+]]:_(<4 x s16>) = reassoc G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX10: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX10: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-CONTRACT-LABEL: name: test_4xhalf_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-CONTRACT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1324,13 +1170,11 @@ body:             |
     ; GFX10-CONTRACT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX10-CONTRACT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX10-CONTRACT: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX10-CONTRACT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(<4 x s16>) = G_FMA [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]], [[CONCAT_VECTORS2]]
     ; GFX10-CONTRACT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FMA]](<4 x s16>)
     ; GFX10-CONTRACT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-CONTRACT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-CONTRACT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-DENORM-LABEL: name: test_4xhalf_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-DENORM: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1341,14 +1185,12 @@ body:             |
     ; GFX10-DENORM: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX10-DENORM: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX10-DENORM: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX10-DENORM: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(<4 x s16>) = reassoc G_FMUL [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(<4 x s16>) = reassoc G_FADD [[FMUL]], [[CONCAT_VECTORS2]]
     ; GFX10-DENORM: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FADD]](<4 x s16>)
     ; GFX10-DENORM: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-DENORM: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-DENORM: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-UNSAFE-LABEL: name: test_4xhalf_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-UNSAFE: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1359,13 +1201,11 @@ body:             |
     ; GFX10-UNSAFE: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX10-UNSAFE: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
     ; GFX10-UNSAFE: [[CONCAT_VECTORS2:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>)
-    ; GFX10-UNSAFE: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(<4 x s16>) = G_FMA [[CONCAT_VECTORS]], [[CONCAT_VECTORS1]], [[CONCAT_VECTORS2]]
     ; GFX10-UNSAFE: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[FMA]](<4 x s16>)
     ; GFX10-UNSAFE: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-UNSAFE: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-UNSAFE: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     %4:_(<2 x s16>) = COPY $vgpr0
     %5:_(<2 x s16>) = COPY $vgpr1
     %0:_(<4 x s16>) = G_CONCAT_VECTORS %4(<2 x s16>), %5(<2 x s16>)
@@ -1375,21 +1215,19 @@ body:             |
     %8:_(<2 x s16>) = COPY $vgpr4
     %9:_(<2 x s16>) = COPY $vgpr5
     %2:_(<4 x s16>) = G_CONCAT_VECTORS %8(<2 x s16>), %9(<2 x s16>)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %10:_(<4 x s16>) = reassoc G_FMUL %0, %1
     %11:_(<4 x s16>) = reassoc G_FADD %10, %2
     %13:_(<2 x s16>), %14:_(<2 x s16>) = G_UNMERGE_VALUES %11(<4 x s16>)
     $vgpr0 = COPY %13(<2 x s16>)
     $vgpr1 = COPY %14(<2 x s16>)
-    %12:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %12, implicit $vgpr0, implicit $vgpr1
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
 ...
 
 ---
 name:            test_3xhalf_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
 
     ; GFX9-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
@@ -1411,7 +1249,6 @@ body:             |
     ; GFX9: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX9: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX9: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX9: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(<3 x s16>) = reassoc G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX9: [[FADD:%[0-9]+]]:_(<3 x s16>) = reassoc G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX9: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1419,8 +1256,7 @@ body:             |
     ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX9: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-CONTRACT-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-CONTRACT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1441,15 +1277,13 @@ body:             |
     ; GFX9-CONTRACT: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX9-CONTRACT: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX9-CONTRACT: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX9-CONTRACT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(<3 x s16>) = G_FMA [[BITCAST1]], [[BITCAST3]], [[BITCAST5]]
     ; GFX9-CONTRACT: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
     ; GFX9-CONTRACT: [[CONCAT_VECTORS3:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[FMA]](<3 x s16>), [[DEF1]](<3 x s16>)
     ; GFX9-CONTRACT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX9-CONTRACT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-CONTRACT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-CONTRACT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-DENORM-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-DENORM: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1470,7 +1304,6 @@ body:             |
     ; GFX9-DENORM: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX9-DENORM: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX9-DENORM: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX9-DENORM: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(<3 x s16>) = reassoc G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(<3 x s16>) = reassoc G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX9-DENORM: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1478,8 +1311,7 @@ body:             |
     ; GFX9-DENORM: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX9-DENORM: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-DENORM: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-DENORM: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX9-UNSAFE-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-UNSAFE: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1500,15 +1332,13 @@ body:             |
     ; GFX9-UNSAFE: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX9-UNSAFE: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX9-UNSAFE: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX9-UNSAFE: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(<3 x s16>) = G_FMA [[BITCAST1]], [[BITCAST3]], [[BITCAST5]]
     ; GFX9-UNSAFE: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
     ; GFX9-UNSAFE: [[CONCAT_VECTORS3:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[FMA]](<3 x s16>), [[DEF1]](<3 x s16>)
     ; GFX9-UNSAFE: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX9-UNSAFE: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX9-UNSAFE: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX9-UNSAFE: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1529,7 +1359,6 @@ body:             |
     ; GFX10: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX10: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX10: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX10: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(<3 x s16>) = reassoc G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX10: [[FADD:%[0-9]+]]:_(<3 x s16>) = reassoc G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX10: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1537,8 +1366,7 @@ body:             |
     ; GFX10: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX10: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-CONTRACT-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-CONTRACT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1559,15 +1387,13 @@ body:             |
     ; GFX10-CONTRACT: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX10-CONTRACT: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX10-CONTRACT: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX10-CONTRACT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(<3 x s16>) = G_FMA [[BITCAST1]], [[BITCAST3]], [[BITCAST5]]
     ; GFX10-CONTRACT: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
     ; GFX10-CONTRACT: [[CONCAT_VECTORS3:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[FMA]](<3 x s16>), [[DEF1]](<3 x s16>)
     ; GFX10-CONTRACT: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX10-CONTRACT: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-CONTRACT: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-CONTRACT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-DENORM-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-DENORM: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1588,7 +1414,6 @@ body:             |
     ; GFX10-DENORM: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX10-DENORM: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX10-DENORM: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX10-DENORM: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(<3 x s16>) = reassoc G_FMUL [[BITCAST1]], [[BITCAST3]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(<3 x s16>) = reassoc G_FADD [[BITCAST5]], [[FMUL]]
     ; GFX10-DENORM: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1596,8 +1421,7 @@ body:             |
     ; GFX10-DENORM: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX10-DENORM: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-DENORM: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-DENORM: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     ; GFX10-UNSAFE-LABEL: name: test_3xhalf_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX10-UNSAFE: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1618,15 +1442,13 @@ body:             |
     ; GFX10-UNSAFE: [[BITCAST4:%[0-9]+]]:_(s96) = G_BITCAST [[CONCAT_VECTORS2]](<6 x s16>)
     ; GFX10-UNSAFE: [[TRUNC2:%[0-9]+]]:_(s48) = G_TRUNC [[BITCAST4]](s96)
     ; GFX10-UNSAFE: [[BITCAST5:%[0-9]+]]:_(<3 x s16>) = G_BITCAST [[TRUNC2]](s48)
-    ; GFX10-UNSAFE: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(<3 x s16>) = G_FMA [[BITCAST1]], [[BITCAST3]], [[BITCAST5]]
     ; GFX10-UNSAFE: [[DEF1:%[0-9]+]]:_(<3 x s16>) = G_IMPLICIT_DEF
     ; GFX10-UNSAFE: [[CONCAT_VECTORS3:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[FMA]](<3 x s16>), [[DEF1]](<3 x s16>)
     ; GFX10-UNSAFE: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>), [[UV2:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS3]](<6 x s16>)
     ; GFX10-UNSAFE: $vgpr0 = COPY [[UV]](<2 x s16>)
     ; GFX10-UNSAFE: $vgpr1 = COPY [[UV1]](<2 x s16>)
-    ; GFX10-UNSAFE: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     %4:_(<2 x s16>) = COPY $vgpr0
     %5:_(<2 x s16>) = COPY $vgpr1
     %10:_(<2 x s16>) = G_IMPLICIT_DEF
@@ -1640,7 +1462,6 @@ body:             |
     %9:_(<2 x s16>) = COPY $vgpr5
     %15:_(<6 x s16>) = G_CONCAT_VECTORS %8(<2 x s16>), %9(<2 x s16>), %10(<2 x s16>)
     %2:_(<3 x s16>), %16:_(<3 x s16>) = G_UNMERGE_VALUES %15(<6 x s16>)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %17:_(<3 x s16>) = reassoc G_FMUL %0, %1
     %18:_(<3 x s16>) = reassoc G_FADD %2, %17
     %22:_(<3 x s16>) = G_IMPLICIT_DEF
@@ -1648,15 +1469,14 @@ body:             |
     %20:_(<2 x s16>), %21:_(<2 x s16>), %24:_(<2 x s16>) = G_UNMERGE_VALUES %23(<6 x s16>)
     $vgpr0 = COPY %20(<2 x s16>)
     $vgpr1 = COPY %21(<2 x s16>)
-    %19:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %19, implicit $vgpr0, implicit $vgpr1
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
 ...
 
 ---
 name:            test_4xdouble_add_mul
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23
 
     ; GFX9-LABEL: name: test_4xdouble_add_mul
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -1698,7 +1518,6 @@ body:             |
     ; GFX9: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX9: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX9: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX9: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(<4 x s64>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9: [[FADD:%[0-9]+]]:_(<4 x s64>) = reassoc G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -1710,8 +1529,7 @@ body:             |
     ; GFX9: $vgpr5 = COPY [[UV5]](s32)
     ; GFX9: $vgpr6 = COPY [[UV6]](s32)
     ; GFX9: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX9: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX9: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX9-CONTRACT-LABEL: name: test_4xdouble_add_mul
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1752,7 +1570,6 @@ body:             |
     ; GFX9-CONTRACT: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX9-CONTRACT: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX9-CONTRACT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX9-CONTRACT: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(<4 x s64>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX9-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<4 x s64>)
     ; GFX9-CONTRACT: $vgpr0 = COPY [[UV]](s32)
@@ -1763,8 +1580,7 @@ body:             |
     ; GFX9-CONTRACT: $vgpr5 = COPY [[UV5]](s32)
     ; GFX9-CONTRACT: $vgpr6 = COPY [[UV6]](s32)
     ; GFX9-CONTRACT: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX9-CONTRACT: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX9-DENORM-LABEL: name: test_4xdouble_add_mul
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1805,7 +1621,6 @@ body:             |
     ; GFX9-DENORM: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX9-DENORM: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX9-DENORM: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX9-DENORM: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(<4 x s64>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(<4 x s64>) = reassoc G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX9-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -1817,8 +1632,7 @@ body:             |
     ; GFX9-DENORM: $vgpr5 = COPY [[UV5]](s32)
     ; GFX9-DENORM: $vgpr6 = COPY [[UV6]](s32)
     ; GFX9-DENORM: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX9-DENORM: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX9-UNSAFE-LABEL: name: test_4xdouble_add_mul
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1859,7 +1673,6 @@ body:             |
     ; GFX9-UNSAFE: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX9-UNSAFE: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX9-UNSAFE: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX9-UNSAFE: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(<4 x s64>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX9-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<4 x s64>)
     ; GFX9-UNSAFE: $vgpr0 = COPY [[UV]](s32)
@@ -1870,8 +1683,7 @@ body:             |
     ; GFX9-UNSAFE: $vgpr5 = COPY [[UV5]](s32)
     ; GFX9-UNSAFE: $vgpr6 = COPY [[UV6]](s32)
     ; GFX9-UNSAFE: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX9-UNSAFE: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX10-LABEL: name: test_4xdouble_add_mul
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1912,7 +1724,6 @@ body:             |
     ; GFX10: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX10: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX10: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX10: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(<4 x s64>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10: [[FADD:%[0-9]+]]:_(<4 x s64>) = reassoc G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -1924,8 +1735,7 @@ body:             |
     ; GFX10: $vgpr5 = COPY [[UV5]](s32)
     ; GFX10: $vgpr6 = COPY [[UV6]](s32)
     ; GFX10: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX10: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX10: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX10-CONTRACT-LABEL: name: test_4xdouble_add_mul
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1966,7 +1776,6 @@ body:             |
     ; GFX10-CONTRACT: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX10-CONTRACT: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX10-CONTRACT: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX10-CONTRACT: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(<4 x s64>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX10-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<4 x s64>)
     ; GFX10-CONTRACT: $vgpr0 = COPY [[UV]](s32)
@@ -1977,8 +1786,7 @@ body:             |
     ; GFX10-CONTRACT: $vgpr5 = COPY [[UV5]](s32)
     ; GFX10-CONTRACT: $vgpr6 = COPY [[UV6]](s32)
     ; GFX10-CONTRACT: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX10-CONTRACT: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX10-DENORM-LABEL: name: test_4xdouble_add_mul
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2019,7 +1827,6 @@ body:             |
     ; GFX10-DENORM: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX10-DENORM: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX10-DENORM: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX10-DENORM: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(<4 x s64>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(<4 x s64>) = reassoc G_FADD [[FMUL]], [[BUILD_VECTOR2]]
     ; GFX10-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<4 x s64>)
@@ -2031,8 +1838,7 @@ body:             |
     ; GFX10-DENORM: $vgpr5 = COPY [[UV5]](s32)
     ; GFX10-DENORM: $vgpr6 = COPY [[UV6]](s32)
     ; GFX10-DENORM: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX10-DENORM: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     ; GFX10-UNSAFE-LABEL: name: test_4xdouble_add_mul
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2073,7 +1879,6 @@ body:             |
     ; GFX10-UNSAFE: [[MV10:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY20]](s32), [[COPY21]](s32)
     ; GFX10-UNSAFE: [[MV11:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY22]](s32), [[COPY23]](s32)
     ; GFX10-UNSAFE: [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64)
-    ; GFX10-UNSAFE: [[COPY24:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(<4 x s64>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX10-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<4 x s64>)
     ; GFX10-UNSAFE: $vgpr0 = COPY [[UV]](s32)
@@ -2084,8 +1889,7 @@ body:             |
     ; GFX10-UNSAFE: $vgpr5 = COPY [[UV5]](s32)
     ; GFX10-UNSAFE: $vgpr6 = COPY [[UV6]](s32)
     ; GFX10-UNSAFE: $vgpr7 = COPY [[UV7]](s32)
-    ; GFX10-UNSAFE: [[COPY25:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY24]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY25]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %6:_(s32) = COPY $vgpr2
@@ -2125,7 +1929,6 @@ body:             |
     %38:_(s64) = G_MERGE_VALUES %24(s32), %25(s32)
     %39:_(s64) = G_MERGE_VALUES %26(s32), %27(s32)
     %2:_(<4 x s64>) = G_BUILD_VECTOR %36(s64), %37(s64), %38(s64), %39(s64)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %40:_(<4 x s64>) = reassoc G_FMUL %0, %1
     %41:_(<4 x s64>) = reassoc G_FADD %40, %2
     %43:_(s32), %44:_(s32), %45:_(s32), %46:_(s32), %47:_(s32), %48:_(s32), %49:_(s32), %50:_(s32) = G_UNMERGE_VALUES %41(<4 x s64>)
@@ -2137,15 +1940,14 @@ body:             |
     $vgpr5 = COPY %48(s32)
     $vgpr6 = COPY %49(s32)
     $vgpr7 = COPY %50(s32)
-    %42:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %42, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
 ...
 
 ---
 name:            test_3xdouble_add_mul_rhs
 body:             |
   bb.1.entry:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17
 
     ; GFX9-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -2178,7 +1980,6 @@ body:             |
     ; GFX9: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX9: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX9: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX9: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9: [[FMUL:%[0-9]+]]:_(<3 x s64>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9: [[FADD:%[0-9]+]]:_(<3 x s64>) = reassoc G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2188,8 +1989,7 @@ body:             |
     ; GFX9: $vgpr3 = COPY [[UV3]](s32)
     ; GFX9: $vgpr4 = COPY [[UV4]](s32)
     ; GFX9: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX9: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX9: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX9: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX9-CONTRACT-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX9-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2221,7 +2021,6 @@ body:             |
     ; GFX9-CONTRACT: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX9-CONTRACT: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX9-CONTRACT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX9-CONTRACT: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-CONTRACT: [[FMA:%[0-9]+]]:_(<3 x s64>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX9-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<3 x s64>)
     ; GFX9-CONTRACT: $vgpr0 = COPY [[UV]](s32)
@@ -2230,8 +2029,7 @@ body:             |
     ; GFX9-CONTRACT: $vgpr3 = COPY [[UV3]](s32)
     ; GFX9-CONTRACT: $vgpr4 = COPY [[UV4]](s32)
     ; GFX9-CONTRACT: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX9-CONTRACT: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX9-CONTRACT: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX9-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX9-DENORM-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX9-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2263,7 +2061,6 @@ body:             |
     ; GFX9-DENORM: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX9-DENORM: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX9-DENORM: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX9-DENORM: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-DENORM: [[FMUL:%[0-9]+]]:_(<3 x s64>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX9-DENORM: [[FADD:%[0-9]+]]:_(<3 x s64>) = reassoc G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX9-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2273,8 +2070,7 @@ body:             |
     ; GFX9-DENORM: $vgpr3 = COPY [[UV3]](s32)
     ; GFX9-DENORM: $vgpr4 = COPY [[UV4]](s32)
     ; GFX9-DENORM: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX9-DENORM: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX9-DENORM: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX9-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX9-UNSAFE-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX9-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX9-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2306,7 +2102,6 @@ body:             |
     ; GFX9-UNSAFE: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX9-UNSAFE: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX9-UNSAFE: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX9-UNSAFE: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-UNSAFE: [[FMA:%[0-9]+]]:_(<3 x s64>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX9-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<3 x s64>)
     ; GFX9-UNSAFE: $vgpr0 = COPY [[UV]](s32)
@@ -2315,8 +2110,7 @@ body:             |
     ; GFX9-UNSAFE: $vgpr3 = COPY [[UV3]](s32)
     ; GFX9-UNSAFE: $vgpr4 = COPY [[UV4]](s32)
     ; GFX9-UNSAFE: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX9-UNSAFE: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX9-UNSAFE: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX9-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX10-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX10: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2348,7 +2142,6 @@ body:             |
     ; GFX10: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX10: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX10: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX10: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10: [[FMUL:%[0-9]+]]:_(<3 x s64>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10: [[FADD:%[0-9]+]]:_(<3 x s64>) = reassoc G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2358,8 +2151,7 @@ body:             |
     ; GFX10: $vgpr3 = COPY [[UV3]](s32)
     ; GFX10: $vgpr4 = COPY [[UV4]](s32)
     ; GFX10: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX10: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX10: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX10: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX10-CONTRACT-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX10-CONTRACT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-CONTRACT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2391,7 +2183,6 @@ body:             |
     ; GFX10-CONTRACT: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX10-CONTRACT: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX10-CONTRACT: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX10-CONTRACT: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-CONTRACT: [[FMA:%[0-9]+]]:_(<3 x s64>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX10-CONTRACT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<3 x s64>)
     ; GFX10-CONTRACT: $vgpr0 = COPY [[UV]](s32)
@@ -2400,8 +2191,7 @@ body:             |
     ; GFX10-CONTRACT: $vgpr3 = COPY [[UV3]](s32)
     ; GFX10-CONTRACT: $vgpr4 = COPY [[UV4]](s32)
     ; GFX10-CONTRACT: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX10-CONTRACT: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX10-CONTRACT: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX10-CONTRACT: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX10-DENORM-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX10-DENORM: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-DENORM: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2433,7 +2223,6 @@ body:             |
     ; GFX10-DENORM: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX10-DENORM: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX10-DENORM: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX10-DENORM: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-DENORM: [[FMUL:%[0-9]+]]:_(<3 x s64>) = reassoc G_FMUL [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; GFX10-DENORM: [[FADD:%[0-9]+]]:_(<3 x s64>) = reassoc G_FADD [[BUILD_VECTOR2]], [[FMUL]]
     ; GFX10-DENORM: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FADD]](<3 x s64>)
@@ -2443,8 +2232,7 @@ body:             |
     ; GFX10-DENORM: $vgpr3 = COPY [[UV3]](s32)
     ; GFX10-DENORM: $vgpr4 = COPY [[UV4]](s32)
     ; GFX10-DENORM: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX10-DENORM: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX10-DENORM: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX10-DENORM: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     ; GFX10-UNSAFE-LABEL: name: test_3xdouble_add_mul_rhs
     ; GFX10-UNSAFE: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; GFX10-UNSAFE: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2476,7 +2264,6 @@ body:             |
     ; GFX10-UNSAFE: [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
     ; GFX10-UNSAFE: [[MV8:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY16]](s32), [[COPY17]](s32)
     ; GFX10-UNSAFE: [[BUILD_VECTOR2:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV6]](s64), [[MV7]](s64), [[MV8]](s64)
-    ; GFX10-UNSAFE: [[COPY18:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX10-UNSAFE: [[FMA:%[0-9]+]]:_(<3 x s64>) = G_FMA [[BUILD_VECTOR]], [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
     ; GFX10-UNSAFE: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[FMA]](<3 x s64>)
     ; GFX10-UNSAFE: $vgpr0 = COPY [[UV]](s32)
@@ -2485,8 +2272,7 @@ body:             |
     ; GFX10-UNSAFE: $vgpr3 = COPY [[UV3]](s32)
     ; GFX10-UNSAFE: $vgpr4 = COPY [[UV4]](s32)
     ; GFX10-UNSAFE: $vgpr5 = COPY [[UV5]](s32)
-    ; GFX10-UNSAFE: [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY18]]
-    ; GFX10-UNSAFE: S_SETPC_B64_return [[COPY19]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    ; GFX10-UNSAFE: S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %6:_(s32) = COPY $vgpr2
@@ -2517,7 +2303,6 @@ body:             |
     %29:_(s64) = G_MERGE_VALUES %18(s32), %19(s32)
     %30:_(s64) = G_MERGE_VALUES %20(s32), %21(s32)
     %2:_(<3 x s64>) = G_BUILD_VECTOR %28(s64), %29(s64), %30(s64)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %31:_(<3 x s64>) = reassoc G_FMUL %0, %1
     %32:_(<3 x s64>) = reassoc G_FADD %2, %31
     %34:_(s32), %35:_(s32), %36:_(s32), %37:_(s32), %38:_(s32), %39:_(s32) = G_UNMERGE_VALUES %32(<3 x s64>)
@@ -2527,6 +2312,5 @@ body:             |
     $vgpr3 = COPY %37(s32)
     $vgpr4 = COPY %38(s32)
     $vgpr5 = COPY %39(s32)
-    %33:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %33, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-imm-chain-illegal-types.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-imm-chain-illegal-types.mir
index e271229aa1fd4..dcec23c030c46 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-imm-chain-illegal-types.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-imm-chain-illegal-types.mir
@@ -283,10 +283,9 @@ body:             |
     liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
 
     ; CHECK-LABEL: name: ushlsat_i44
-    ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0, $vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-    ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s44) = G_TRUNC [[MV]](s64)
     ; CHECK: [[C:%[0-9]+]]:_(s44) = G_CONSTANT i44 22
@@ -296,11 +295,9 @@ body:             |
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s64)
     ; CHECK: $vgpr0 = COPY [[UV]](s32)
     ; CHECK: $vgpr1 = COPY [[UV1]](s32)
-    ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-    ; CHECK: S_SETPC_B64_return [[COPY3]], implicit $vgpr0, implicit $vgpr1
+    ; CHECK: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     %2:_(s32) = COPY $vgpr0
     %3:_(s32) = COPY $vgpr1
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %4:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
     %0:_(s44) = G_TRUNC %4(s64)
     %5:_(s44) = G_CONSTANT i44 22
@@ -310,8 +307,7 @@ body:             |
     %10:_(s32), %11:_(s32) = G_UNMERGE_VALUES %9(s64)
     $vgpr0 = COPY %10(s32)
     $vgpr1 = COPY %11(s32)
-    %8:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %8, implicit $vgpr0, implicit $vgpr1
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
 
 ...
 ---
@@ -319,13 +315,12 @@ name:            ushlsat_i55
 tracksRegLiveness: true
 body:             |
   bb.0:
-    liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1
 
     ; CHECK-LABEL: name: ushlsat_i55
-    ; CHECK: liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0, $vgpr1
     ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-    ; CHECK: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
     ; CHECK: [[TRUNC:%[0-9]+]]:_(s55) = G_TRUNC [[MV]](s64)
     ; CHECK: [[C:%[0-9]+]]:_(s55) = G_CONSTANT i55 53
@@ -334,11 +329,9 @@ body:             |
     ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s64)
     ; CHECK: $vgpr0 = COPY [[UV]](s32)
     ; CHECK: $vgpr1 = COPY [[UV1]](s32)
-    ; CHECK: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-    ; CHECK: S_SETPC_B64_return [[COPY3]], implicit $vgpr0, implicit $vgpr1
+    ; CHECK: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
     %2:_(s32) = COPY $vgpr0
     %3:_(s32) = COPY $vgpr1
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %4:_(s64) = G_MERGE_VALUES %2(s32), %3(s32)
     %0:_(s55) = G_TRUNC %4(s64)
     %5:_(s55) = G_CONSTANT i55 50
@@ -349,7 +342,6 @@ body:             |
     %11:_(s32), %12:_(s32) = G_UNMERGE_VALUES %10(s64)
     $vgpr0 = COPY %11(s32)
     $vgpr1 = COPY %12(s32)
-    %9:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %9, implicit $vgpr0, implicit $vgpr1
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0, implicit $vgpr1
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/dropped_debug_info_assert.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/dropped_debug_info_assert.ll
index 6125f13348b14..f9fe75c612b62 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/dropped_debug_info_assert.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/dropped_debug_info_assert.ll
@@ -58,15 +58,13 @@ define void @returnaddress_debug_loc(i8* addrspace(1)* %ptr) {
   ; CHECK: bb.1.entry:
   ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY]], %subreg.sub0, [[COPY1]], %subreg.sub1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sreg_64 = COPY [[COPY2]], debug-location !6
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vreg_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE]], [[COPY4]], 0, 0, implicit $exec :: (store (p0) into %ir.ptr, addrspace 1)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]]
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr30_sgpr31, debug-location !6
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+  ; CHECK-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY1]], %subreg.sub0, [[COPY2]], %subreg.sub1
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vreg_64 = COPY [[COPY]]
+  ; CHECK-NEXT:   GLOBAL_STORE_DWORDX2 [[REG_SEQUENCE]], [[COPY3]], 0, 0, implicit $exec :: (store (p0) into %ir.ptr, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
 entry:
   %returnaddr = call i8* @llvm.returnaddress(i32 0), !dbg !6
   store i8* %returnaddr, i8* addrspace(1)* %ptr, align 8

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/dummy-target.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/dummy-target.ll
index 6d332acd7e89f..5dfde116785db 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/dummy-target.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/dummy-target.ll
@@ -6,16 +6,14 @@
 define i16 @vop3p_add_i16(i16 %arg0) #0 {
   ; CHECK-LABEL: name: vop3p_add_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[TRUNC]]
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ADD]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %add = add i16 %arg0, %arg0
   ret i16 %add
 }
@@ -23,10 +21,9 @@ define i16 @vop3p_add_i16(i16 %arg0) #0 {
 define <2 x i16> @vop3p_add_v2i16(<2 x i16> %arg0) #0 {
   ; CHECK-LABEL: name: vop3p_add_v2i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
@@ -44,8 +41,7 @@ define <2 x i16> @vop3p_add_v2i16(<2 x i16> %arg0) #0 {
   ; CHECK-NEXT:   [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
   ; CHECK-NEXT:   [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
   ; CHECK-NEXT:   $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %add = add <2 x i16> %arg0, %arg0
   ret <2 x i16> %add
 }
@@ -53,14 +49,12 @@ define <2 x i16> @vop3p_add_v2i16(<2 x i16> %arg0) #0 {
 define i16 @halfinsts_add_i16(i16 %arg0) #1 {
   ; CHECK-LABEL: name: halfinsts_add_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[ADD]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %add = add i16 %arg0, %arg0
   ret i16 %add
 }
@@ -68,17 +62,15 @@ define i16 @halfinsts_add_i16(i16 %arg0) #1 {
 define <2 x i16> @halfinsts_add_v2i16(<2 x i16> %arg0) #1 {
   ; CHECK-LABEL: name: halfinsts_add_v2i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
   ; CHECK-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY1]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[ADD]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[ADD1]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %add = add <2 x i16> %arg0, %arg0
   ret <2 x i16> %add
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f32.ll
index 7cd475bf00987..140c3ff694079 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f32.ll
@@ -667,8 +667,8 @@ define <2 x float> @v_fdiv_v2f32(<2 x float> %a, <2 x float> %b) {
 ; GFX89-IEEE-NEXT:    v_fma_f32 v10, v12, v8, v10
 ; GFX89-IEEE-NEXT:    v_fma_f32 v11, v13, v9, v11
 ; GFX89-IEEE-NEXT:    v_fma_f32 v4, -v4, v10, v6
-; GFX89-IEEE-NEXT:    v_div_fmas_f32 v4, v4, v8, v10
 ; GFX89-IEEE-NEXT:    v_fma_f32 v5, -v5, v11, v7
+; GFX89-IEEE-NEXT:    v_div_fmas_f32 v4, v4, v8, v10
 ; GFX89-IEEE-NEXT:    s_mov_b64 vcc, s[4:5]
 ; GFX89-IEEE-NEXT:    v_div_fmas_f32 v5, v5, v9, v11
 ; GFX89-IEEE-NEXT:    v_div_fixup_f32 v0, v4, v2, v0
@@ -860,8 +860,8 @@ define <2 x float> @v_fdiv_v2f32_ulp25(<2 x float> %a, <2 x float> %b) {
 ; GFX89-IEEE-NEXT:    v_fma_f32 v10, v12, v8, v10
 ; GFX89-IEEE-NEXT:    v_fma_f32 v11, v13, v9, v11
 ; GFX89-IEEE-NEXT:    v_fma_f32 v4, -v4, v10, v6
-; GFX89-IEEE-NEXT:    v_div_fmas_f32 v4, v4, v8, v10
 ; GFX89-IEEE-NEXT:    v_fma_f32 v5, -v5, v11, v7
+; GFX89-IEEE-NEXT:    v_div_fmas_f32 v4, v4, v8, v10
 ; GFX89-IEEE-NEXT:    s_mov_b64 vcc, s[4:5]
 ; GFX89-IEEE-NEXT:    v_div_fmas_f32 v5, v5, v9, v11
 ; GFX89-IEEE-NEXT:    v_div_fixup_f32 v0, v4, v2, v0
@@ -1451,8 +1451,8 @@ define <2 x float> @v_fdiv_v2f32_arcp_ulp25(<2 x float> %a, <2 x float> %b) {
 ; GFX89-IEEE-NEXT:    v_fma_f32 v10, v12, v8, v10
 ; GFX89-IEEE-NEXT:    v_fma_f32 v11, v13, v9, v11
 ; GFX89-IEEE-NEXT:    v_fma_f32 v4, -v4, v10, v6
-; GFX89-IEEE-NEXT:    v_div_fmas_f32 v4, v4, v8, v10
 ; GFX89-IEEE-NEXT:    v_fma_f32 v5, -v5, v11, v7
+; GFX89-IEEE-NEXT:    v_div_fmas_f32 v4, v4, v8, v10
 ; GFX89-IEEE-NEXT:    s_mov_b64 vcc, s[4:5]
 ; GFX89-IEEE-NEXT:    v_div_fmas_f32 v5, v5, v9, v11
 ; GFX89-IEEE-NEXT:    v_div_fixup_f32 v0, v4, v2, v0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f64.ll
index 5431744849247..39153cb8a744e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fdiv.f64.ll
@@ -837,33 +837,34 @@ define <2 x double> @v_rcp_v2f64(<2 x double> %x) {
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX6-NEXT:    v_div_scale_f64 v[4:5], s[4:5], v[0:1], v[0:1], 1.0
-; GFX6-NEXT:    v_div_scale_f64 v[10:11], s[4:5], 1.0, v[0:1], 1.0
+; GFX6-NEXT:    v_div_scale_f64 v[10:11], s[4:5], v[2:3], v[2:3], 1.0
 ; GFX6-NEXT:    v_rcp_f64_e32 v[6:7], v[4:5]
 ; GFX6-NEXT:    v_mov_b32_e32 v18, 0x3ff00000
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, v18, v11
+; GFX6-NEXT:    v_rcp_f64_e32 v[14:15], v[10:11]
 ; GFX6-NEXT:    v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
+; GFX6-NEXT:    v_fma_f64 v[16:17], -v[10:11], v[14:15], 1.0
 ; GFX6-NEXT:    v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
-; GFX6-NEXT:    v_div_scale_f64 v[8:9], s[4:5], v[2:3], v[2:3], 1.0
+; GFX6-NEXT:    v_div_scale_f64 v[8:9], s[4:5], 1.0, v[0:1], 1.0
 ; GFX6-NEXT:    v_fma_f64 v[12:13], -v[4:5], v[6:7], 1.0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v1, v5
+; GFX6-NEXT:    v_fma_f64 v[14:15], v[14:15], v[16:17], v[14:15]
 ; GFX6-NEXT:    v_fma_f64 v[6:7], v[6:7], v[12:13], v[6:7]
-; GFX6-NEXT:    v_rcp_f64_e32 v[12:13], v[8:9]
-; GFX6-NEXT:    v_mul_f64 v[14:15], v[10:11], v[6:7]
-; GFX6-NEXT:    s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT:    v_fma_f64 v[10:11], -v[4:5], v[14:15], v[10:11]
-; GFX6-NEXT:    v_fma_f64 v[16:17], -v[8:9], v[12:13], 1.0
-; GFX6-NEXT:    v_div_fmas_f64 v[6:7], v[10:11], v[6:7], v[14:15]
-; GFX6-NEXT:    v_fma_f64 v[12:13], v[12:13], v[16:17], v[12:13]
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, v18, v9
+; GFX6-NEXT:    v_mul_f64 v[12:13], v[8:9], v[6:7]
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v1, v5
+; GFX6-NEXT:    v_fma_f64 v[8:9], -v[4:5], v[12:13], v[8:9]
+; GFX6-NEXT:    v_fma_f64 v[4:5], -v[10:11], v[14:15], 1.0
 ; GFX6-NEXT:    v_div_scale_f64 v[16:17], s[6:7], 1.0, v[2:3], 1.0
-; GFX6-NEXT:    v_fma_f64 v[4:5], -v[8:9], v[12:13], 1.0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v3, v9
-; GFX6-NEXT:    v_fma_f64 v[4:5], v[12:13], v[4:5], v[12:13]
+; GFX6-NEXT:    v_fma_f64 v[4:5], v[14:15], v[4:5], v[14:15]
+; GFX6-NEXT:    s_xor_b64 vcc, vcc, s[4:5]
+; GFX6-NEXT:    v_mul_f64 v[14:15], v[16:17], v[4:5]
+; GFX6-NEXT:    v_div_fmas_f64 v[6:7], v[8:9], v[6:7], v[12:13]
+; GFX6-NEXT:    v_fma_f64 v[8:9], -v[10:11], v[14:15], v[16:17]
 ; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, v18, v17
-; GFX6-NEXT:    v_mul_f64 v[12:13], v[16:17], v[4:5]
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v3, v11
 ; GFX6-NEXT:    s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT:    v_fma_f64 v[10:11], -v[8:9], v[12:13], v[16:17]
 ; GFX6-NEXT:    v_div_fixup_f64 v[0:1], v[6:7], v[0:1], 1.0
-; GFX6-NEXT:    v_div_fmas_f64 v[4:5], v[10:11], v[4:5], v[12:13]
+; GFX6-NEXT:    s_nop 0
+; GFX6-NEXT:    v_div_fmas_f64 v[4:5], v[8:9], v[4:5], v[14:15]
 ; GFX6-NEXT:    v_div_fixup_f64 v[2:3], v[4:5], v[2:3], 1.0
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -960,33 +961,34 @@ define <2 x double> @v_rcp_v2f64_arcp(<2 x double> %x) {
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX6-NEXT:    v_div_scale_f64 v[4:5], s[4:5], v[0:1], v[0:1], 1.0
-; GFX6-NEXT:    v_div_scale_f64 v[10:11], s[4:5], 1.0, v[0:1], 1.0
+; GFX6-NEXT:    v_div_scale_f64 v[10:11], s[4:5], v[2:3], v[2:3], 1.0
 ; GFX6-NEXT:    v_rcp_f64_e32 v[6:7], v[4:5]
 ; GFX6-NEXT:    v_mov_b32_e32 v18, 0x3ff00000
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, v18, v11
+; GFX6-NEXT:    v_rcp_f64_e32 v[14:15], v[10:11]
 ; GFX6-NEXT:    v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
+; GFX6-NEXT:    v_fma_f64 v[16:17], -v[10:11], v[14:15], 1.0
 ; GFX6-NEXT:    v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
-; GFX6-NEXT:    v_div_scale_f64 v[8:9], s[4:5], v[2:3], v[2:3], 1.0
+; GFX6-NEXT:    v_div_scale_f64 v[8:9], s[4:5], 1.0, v[0:1], 1.0
 ; GFX6-NEXT:    v_fma_f64 v[12:13], -v[4:5], v[6:7], 1.0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v1, v5
+; GFX6-NEXT:    v_fma_f64 v[14:15], v[14:15], v[16:17], v[14:15]
 ; GFX6-NEXT:    v_fma_f64 v[6:7], v[6:7], v[12:13], v[6:7]
-; GFX6-NEXT:    v_rcp_f64_e32 v[12:13], v[8:9]
-; GFX6-NEXT:    v_mul_f64 v[14:15], v[10:11], v[6:7]
-; GFX6-NEXT:    s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT:    v_fma_f64 v[10:11], -v[4:5], v[14:15], v[10:11]
-; GFX6-NEXT:    v_fma_f64 v[16:17], -v[8:9], v[12:13], 1.0
-; GFX6-NEXT:    v_div_fmas_f64 v[6:7], v[10:11], v[6:7], v[14:15]
-; GFX6-NEXT:    v_fma_f64 v[12:13], v[12:13], v[16:17], v[12:13]
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, v18, v9
+; GFX6-NEXT:    v_mul_f64 v[12:13], v[8:9], v[6:7]
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v1, v5
+; GFX6-NEXT:    v_fma_f64 v[8:9], -v[4:5], v[12:13], v[8:9]
+; GFX6-NEXT:    v_fma_f64 v[4:5], -v[10:11], v[14:15], 1.0
 ; GFX6-NEXT:    v_div_scale_f64 v[16:17], s[6:7], 1.0, v[2:3], 1.0
-; GFX6-NEXT:    v_fma_f64 v[4:5], -v[8:9], v[12:13], 1.0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v3, v9
-; GFX6-NEXT:    v_fma_f64 v[4:5], v[12:13], v[4:5], v[12:13]
+; GFX6-NEXT:    v_fma_f64 v[4:5], v[14:15], v[4:5], v[14:15]
+; GFX6-NEXT:    s_xor_b64 vcc, vcc, s[4:5]
+; GFX6-NEXT:    v_mul_f64 v[14:15], v[16:17], v[4:5]
+; GFX6-NEXT:    v_div_fmas_f64 v[6:7], v[8:9], v[6:7], v[12:13]
+; GFX6-NEXT:    v_fma_f64 v[8:9], -v[10:11], v[14:15], v[16:17]
 ; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, v18, v17
-; GFX6-NEXT:    v_mul_f64 v[12:13], v[16:17], v[4:5]
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v3, v11
 ; GFX6-NEXT:    s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT:    v_fma_f64 v[10:11], -v[8:9], v[12:13], v[16:17]
 ; GFX6-NEXT:    v_div_fixup_f64 v[0:1], v[6:7], v[0:1], 1.0
-; GFX6-NEXT:    v_div_fmas_f64 v[4:5], v[10:11], v[4:5], v[12:13]
+; GFX6-NEXT:    s_nop 0
+; GFX6-NEXT:    v_div_fmas_f64 v[4:5], v[8:9], v[4:5], v[14:15]
 ; GFX6-NEXT:    v_div_fixup_f64 v[2:3], v[4:5], v[2:3], 1.0
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -1130,33 +1132,34 @@ define <2 x double> @v_rcp_v2f64_ulp25(<2 x double> %x) {
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX6-NEXT:    v_div_scale_f64 v[4:5], s[4:5], v[0:1], v[0:1], 1.0
-; GFX6-NEXT:    v_div_scale_f64 v[10:11], s[4:5], 1.0, v[0:1], 1.0
+; GFX6-NEXT:    v_div_scale_f64 v[10:11], s[4:5], v[2:3], v[2:3], 1.0
 ; GFX6-NEXT:    v_rcp_f64_e32 v[6:7], v[4:5]
 ; GFX6-NEXT:    v_mov_b32_e32 v18, 0x3ff00000
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, v18, v11
+; GFX6-NEXT:    v_rcp_f64_e32 v[14:15], v[10:11]
 ; GFX6-NEXT:    v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
+; GFX6-NEXT:    v_fma_f64 v[16:17], -v[10:11], v[14:15], 1.0
 ; GFX6-NEXT:    v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
-; GFX6-NEXT:    v_div_scale_f64 v[8:9], s[4:5], v[2:3], v[2:3], 1.0
+; GFX6-NEXT:    v_div_scale_f64 v[8:9], s[4:5], 1.0, v[0:1], 1.0
 ; GFX6-NEXT:    v_fma_f64 v[12:13], -v[4:5], v[6:7], 1.0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v1, v5
+; GFX6-NEXT:    v_fma_f64 v[14:15], v[14:15], v[16:17], v[14:15]
 ; GFX6-NEXT:    v_fma_f64 v[6:7], v[6:7], v[12:13], v[6:7]
-; GFX6-NEXT:    v_rcp_f64_e32 v[12:13], v[8:9]
-; GFX6-NEXT:    v_mul_f64 v[14:15], v[10:11], v[6:7]
-; GFX6-NEXT:    s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT:    v_fma_f64 v[10:11], -v[4:5], v[14:15], v[10:11]
-; GFX6-NEXT:    v_fma_f64 v[16:17], -v[8:9], v[12:13], 1.0
-; GFX6-NEXT:    v_div_fmas_f64 v[6:7], v[10:11], v[6:7], v[14:15]
-; GFX6-NEXT:    v_fma_f64 v[12:13], v[12:13], v[16:17], v[12:13]
+; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, v18, v9
+; GFX6-NEXT:    v_mul_f64 v[12:13], v[8:9], v[6:7]
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v1, v5
+; GFX6-NEXT:    v_fma_f64 v[8:9], -v[4:5], v[12:13], v[8:9]
+; GFX6-NEXT:    v_fma_f64 v[4:5], -v[10:11], v[14:15], 1.0
 ; GFX6-NEXT:    v_div_scale_f64 v[16:17], s[6:7], 1.0, v[2:3], 1.0
-; GFX6-NEXT:    v_fma_f64 v[4:5], -v[8:9], v[12:13], 1.0
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v3, v9
-; GFX6-NEXT:    v_fma_f64 v[4:5], v[12:13], v[4:5], v[12:13]
+; GFX6-NEXT:    v_fma_f64 v[4:5], v[14:15], v[4:5], v[14:15]
+; GFX6-NEXT:    s_xor_b64 vcc, vcc, s[4:5]
+; GFX6-NEXT:    v_mul_f64 v[14:15], v[16:17], v[4:5]
+; GFX6-NEXT:    v_div_fmas_f64 v[6:7], v[8:9], v[6:7], v[12:13]
+; GFX6-NEXT:    v_fma_f64 v[8:9], -v[10:11], v[14:15], v[16:17]
 ; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, v18, v17
-; GFX6-NEXT:    v_mul_f64 v[12:13], v[16:17], v[4:5]
+; GFX6-NEXT:    v_cmp_eq_u32_e64 s[4:5], v3, v11
 ; GFX6-NEXT:    s_xor_b64 vcc, vcc, s[4:5]
-; GFX6-NEXT:    v_fma_f64 v[10:11], -v[8:9], v[12:13], v[16:17]
 ; GFX6-NEXT:    v_div_fixup_f64 v[0:1], v[6:7], v[0:1], 1.0
-; GFX6-NEXT:    v_div_fmas_f64 v[4:5], v[10:11], v[4:5], v[12:13]
+; GFX6-NEXT:    s_nop 0
+; GFX6-NEXT:    v_div_fmas_f64 v[4:5], v[8:9], v[4:5], v[14:15]
 ; GFX6-NEXT:    v_div_fixup_f64 v[2:3], v[4:5], v[2:3], 1.0
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fpow.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fpow.ll
index bc2c5c7371046..4667975ca0d01 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fpow.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fpow.ll
@@ -188,11 +188,11 @@ define <2 x half> @v_pow_v2f16(<2 x half> %x, <2 x half> %y) {
 ; GFX9-NEXT:    v_cvt_f32_f16_e32 v0, v0
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v2, v2, v3
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v0, v0, v1
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v2
 ; GFX9-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX9-NEXT:    v_exp_f16_e32 v1, v2
-; GFX9-NEXT:    v_exp_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 0xffff
+; GFX9-NEXT:    v_exp_f16_e32 v1, v1
+; GFX9-NEXT:    v_exp_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
 ; GFX9-NEXT:    v_and_or_b32 v0, v1, v2, v0
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -274,11 +274,11 @@ define <2 x half> @v_pow_v2f16_fneg_lhs(<2 x half> %x, <2 x half> %y) {
 ; GFX9-NEXT:    v_cvt_f32_f16_e32 v0, v0
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v2, v2, v3
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v0, v0, v1
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v2, v2
+; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v2
 ; GFX9-NEXT:    v_cvt_f16_f32_e32 v0, v0
-; GFX9-NEXT:    v_exp_f16_e32 v1, v2
-; GFX9-NEXT:    v_exp_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 0xffff
+; GFX9-NEXT:    v_exp_f16_e32 v1, v1
+; GFX9-NEXT:    v_exp_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
 ; GFX9-NEXT:    v_and_or_b32 v0, v1, v2, v0
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -358,8 +358,8 @@ define <2 x half> @v_pow_v2f16_fneg_rhs(<2 x half> %x, <2 x half> %y) {
 ; GFX9-NEXT:    v_xor_b32_e32 v1, 0x80008000, v1
 ; GFX9-NEXT:    v_cvt_f32_f16_e32 v3, v1
 ; GFX9-NEXT:    v_cvt_f32_f16_e32 v2, v2
-; GFX9-NEXT:    v_cvt_f32_f16_e32 v0, v0
 ; GFX9-NEXT:    v_cvt_f32_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
+; GFX9-NEXT:    v_cvt_f32_f16_e32 v0, v0
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v2, v2, v3
 ; GFX9-NEXT:    v_cvt_f16_f32_e32 v2, v2
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v0, v0, v1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
index 09e49cedca486..f0f61e1803bd9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshl.ll
@@ -2094,18 +2094,18 @@ define <2 x i24> @v_fshl_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v6, v6
 ; GFX9-NEXT:    v_and_b32_e32 v4, 0xffffff, v4
 ; GFX9-NEXT:    v_mul_f32_e32 v9, 0x4f7ffffe, v9
-; GFX9-NEXT:    v_bfe_u32 v2, v2, 1, 23
+; GFX9-NEXT:    v_cvt_u32_f32_e32 v9, v9
 ; GFX9-NEXT:    v_mul_lo_u32 v8, v7, v6
+; GFX9-NEXT:    v_bfe_u32 v2, v2, 1, 23
 ; GFX9-NEXT:    v_bfe_u32 v3, v3, 1, 23
+; GFX9-NEXT:    v_mul_lo_u32 v7, v7, v9
 ; GFX9-NEXT:    v_mul_hi_u32 v8, v6, v8
+; GFX9-NEXT:    v_mul_hi_u32 v7, v9, v7
 ; GFX9-NEXT:    v_add_u32_e32 v6, v6, v8
-; GFX9-NEXT:    v_cvt_u32_f32_e32 v8, v9
 ; GFX9-NEXT:    v_mul_hi_u32 v6, v4, v6
-; GFX9-NEXT:    v_mov_b32_e32 v9, 0xffffff
-; GFX9-NEXT:    v_and_b32_e32 v5, v5, v9
-; GFX9-NEXT:    v_mul_lo_u32 v7, v7, v8
+; GFX9-NEXT:    v_mov_b32_e32 v8, 0xffffff
+; GFX9-NEXT:    v_and_b32_e32 v5, v5, v8
 ; GFX9-NEXT:    v_mul_lo_u32 v6, v6, 24
-; GFX9-NEXT:    v_mul_hi_u32 v7, v8, v7
 ; GFX9-NEXT:    v_sub_u32_e32 v4, v4, v6
 ; GFX9-NEXT:    v_subrev_u32_e32 v6, 24, v4
 ; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, 24, v4
@@ -2113,11 +2113,11 @@ define <2 x i24> @v_fshl_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
 ; GFX9-NEXT:    v_subrev_u32_e32 v6, 24, v4
 ; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, 24, v4
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
-; GFX9-NEXT:    v_add_u32_e32 v6, v8, v7
+; GFX9-NEXT:    v_add_u32_e32 v6, v9, v7
 ; GFX9-NEXT:    v_mul_hi_u32 v6, v5, v6
 ; GFX9-NEXT:    v_sub_u32_e32 v7, 23, v4
-; GFX9-NEXT:    v_and_b32_e32 v7, v7, v9
-; GFX9-NEXT:    v_and_b32_e32 v4, v4, v9
+; GFX9-NEXT:    v_and_b32_e32 v7, v7, v8
+; GFX9-NEXT:    v_and_b32_e32 v4, v4, v8
 ; GFX9-NEXT:    v_mul_lo_u32 v6, v6, 24
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v2, v7, v2
 ; GFX9-NEXT:    v_lshl_or_b32 v0, v0, v4, v2
@@ -2129,8 +2129,8 @@ define <2 x i24> @v_fshl_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
 ; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, 24, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GFX9-NEXT:    v_sub_u32_e32 v4, 23, v2
-; GFX9-NEXT:    v_and_b32_e32 v4, v4, v9
-; GFX9-NEXT:    v_and_b32_e32 v2, v2, v9
+; GFX9-NEXT:    v_and_b32_e32 v4, v4, v8
+; GFX9-NEXT:    v_and_b32_e32 v2, v2, v8
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v3, v4, v3
 ; GFX9-NEXT:    v_lshl_or_b32 v1, v1, v2, v3
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
index b570be394ca4e..ff6618f8534b2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/fshr.ll
@@ -2100,40 +2100,40 @@ define <2 x i24> @v_fshr_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v6, 24
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v6, v6
-; GFX9-NEXT:    v_mov_b32_e32 v7, 0xffffffe8
 ; GFX9-NEXT:    v_cvt_f32_ubyte0_e32 v9, 24
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v9, v9
+; GFX9-NEXT:    v_mov_b32_e32 v7, 0xffffffe8
 ; GFX9-NEXT:    v_mul_f32_e32 v6, 0x4f7ffffe, v6
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v6, v6
-; GFX9-NEXT:    v_and_b32_e32 v4, 0xffffff, v4
 ; GFX9-NEXT:    v_mul_f32_e32 v9, 0x4f7ffffe, v9
-; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; GFX9-NEXT:    v_cvt_u32_f32_e32 v9, v9
+; GFX9-NEXT:    v_and_b32_e32 v4, 0xffffff, v4
 ; GFX9-NEXT:    v_mul_lo_u32 v8, v7, v6
+; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
+; GFX9-NEXT:    v_mul_lo_u32 v7, v7, v9
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 1, v1
 ; GFX9-NEXT:    v_mul_hi_u32 v8, v6, v8
+; GFX9-NEXT:    v_mul_hi_u32 v7, v9, v7
 ; GFX9-NEXT:    v_add_u32_e32 v6, v6, v8
-; GFX9-NEXT:    v_cvt_u32_f32_e32 v8, v9
 ; GFX9-NEXT:    v_mul_hi_u32 v6, v4, v6
-; GFX9-NEXT:    v_mov_b32_e32 v9, 0xffffff
-; GFX9-NEXT:    v_and_b32_e32 v5, v5, v9
-; GFX9-NEXT:    v_mul_lo_u32 v7, v7, v8
+; GFX9-NEXT:    v_mov_b32_e32 v8, 0xffffff
+; GFX9-NEXT:    v_and_b32_e32 v5, v5, v8
+; GFX9-NEXT:    v_add_u32_e32 v7, v9, v7
 ; GFX9-NEXT:    v_mul_lo_u32 v6, v6, 24
-; GFX9-NEXT:    v_and_b32_e32 v2, v2, v9
-; GFX9-NEXT:    v_and_b32_e32 v3, v3, v9
-; GFX9-NEXT:    v_mul_hi_u32 v7, v8, v7
+; GFX9-NEXT:    v_mul_hi_u32 v7, v5, v7
+; GFX9-NEXT:    v_and_b32_e32 v2, v2, v8
+; GFX9-NEXT:    v_and_b32_e32 v3, v3, v8
 ; GFX9-NEXT:    v_sub_u32_e32 v4, v4, v6
 ; GFX9-NEXT:    v_subrev_u32_e32 v6, 24, v4
 ; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, 24, v4
-; GFX9-NEXT:    v_add_u32_e32 v7, v8, v7
-; GFX9-NEXT:    v_mul_hi_u32 v7, v5, v7
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
 ; GFX9-NEXT:    v_subrev_u32_e32 v6, 24, v4
 ; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, 24, v4
 ; GFX9-NEXT:    v_mul_lo_u32 v7, v7, 24
 ; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
 ; GFX9-NEXT:    v_sub_u32_e32 v6, 23, v4
-; GFX9-NEXT:    v_and_b32_e32 v4, v4, v9
-; GFX9-NEXT:    v_and_b32_e32 v6, v6, v9
+; GFX9-NEXT:    v_and_b32_e32 v4, v4, v8
+; GFX9-NEXT:    v_and_b32_e32 v6, v6, v8
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v2, v4, v2
 ; GFX9-NEXT:    v_lshl_or_b32 v0, v0, v6, v2
 ; GFX9-NEXT:    v_sub_u32_e32 v2, v5, v7
@@ -2144,8 +2144,8 @@ define <2 x i24> @v_fshr_v2i24(<2 x i24> %lhs, <2 x i24> %rhs, <2 x i24> %amt) {
 ; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, 24, v2
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GFX9-NEXT:    v_sub_u32_e32 v4, 23, v2
-; GFX9-NEXT:    v_and_b32_e32 v2, v2, v9
-; GFX9-NEXT:    v_and_b32_e32 v4, v4, v9
+; GFX9-NEXT:    v_and_b32_e32 v2, v2, v8
+; GFX9-NEXT:    v_and_b32_e32 v4, v4, v8
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v2, v2, v3
 ; GFX9-NEXT:    v_lshl_or_b32 v1, v1, v4, v2
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
@@ -4095,8 +4095,8 @@ define <4 x half> @v_fshr_v4i16(<4 x i16> %lhs, <4 x i16> %rhs, <4 x i16> %amt)
 ; GFX10-NEXT:    v_and_b32_e32 v4, s4, v4
 ; GFX10-NEXT:    v_and_b32_e32 v6, s4, v6
 ; GFX10-NEXT:    v_and_b32_e32 v5, s4, v5
-; GFX10-NEXT:    v_and_b32_e32 v7, s4, v7
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v1, 1, v1 op_sel_hi:[0,1]
+; GFX10-NEXT:    v_and_b32_e32 v7, s4, v7
 ; GFX10-NEXT:    v_pk_lshrrev_b16 v2, v4, v2
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v0, v6, v0
 ; GFX10-NEXT:    v_pk_lshrrev_b16 v3, v5, v3

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
index 122b5e65df6ee..1a186b937a995 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/function-returns.ll
@@ -6,15 +6,11 @@
 define i1 @i1_func_void() #0 {
   ; CHECK-LABEL: name: i1_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `i1 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s1)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i1, i1 addrspace(1)* undef
   ret i1 %val
 }
@@ -22,15 +18,11 @@ define i1 @i1_func_void() #0 {
 define zeroext i1 @i1_zeroext_func_void() #0 {
   ; CHECK-LABEL: name: i1_zeroext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `i1 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s1)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ZEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i1, i1 addrspace(1)* undef
   ret i1 %val
 }
@@ -38,15 +30,11 @@ define zeroext i1 @i1_zeroext_func_void() #0 {
 define signext i1 @i1_signext_func_void() #0 {
   ; CHECK-LABEL: name: i1_signext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1) = G_LOAD [[DEF]](p1) :: (load (s1) from `i1 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s1)
   ; CHECK-NEXT:   $vgpr0 = COPY [[SEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i1, i1 addrspace(1)* undef
   ret i1 %val
 }
@@ -54,15 +42,11 @@ define signext i1 @i1_signext_func_void() #0 {
 define i7 @i7_func_void() #0 {
   ; CHECK-LABEL: name: i7_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `i7 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s7)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i7, i7 addrspace(1)* undef
   ret i7 %val
 }
@@ -70,15 +54,11 @@ define i7 @i7_func_void() #0 {
 define zeroext i7 @i7_zeroext_func_void() #0 {
   ; CHECK-LABEL: name: i7_zeroext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `i7 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s7)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ZEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i7, i7 addrspace(1)* undef
   ret i7 %val
 }
@@ -86,15 +66,11 @@ define zeroext i7 @i7_zeroext_func_void() #0 {
 define signext i7 @i7_signext_func_void() #0 {
   ; CHECK-LABEL: name: i7_signext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s7) = G_LOAD [[DEF]](p1) :: (load (s7) from `i7 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s7)
   ; CHECK-NEXT:   $vgpr0 = COPY [[SEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i7, i7 addrspace(1)* undef
   ret i7 %val
 }
@@ -102,15 +78,11 @@ define signext i7 @i7_signext_func_void() #0 {
 define i8 @i8_func_void() #0 {
   ; CHECK-LABEL: name: i8_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i8, i8 addrspace(1)* undef
   ret i8 %val
 }
@@ -118,15 +90,11 @@ define i8 @i8_func_void() #0 {
 define zeroext i8 @i8_zeroext_func_void() #0 {
   ; CHECK-LABEL: name: i8_zeroext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s8)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ZEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i8, i8 addrspace(1)* undef
   ret i8 %val
 }
@@ -134,15 +102,11 @@ define zeroext i8 @i8_zeroext_func_void() #0 {
 define signext i8 @i8_signext_func_void() #0 {
   ; CHECK-LABEL: name: i8_signext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s8)
   ; CHECK-NEXT:   $vgpr0 = COPY [[SEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i8, i8 addrspace(1)* undef
   ret i8 %val
 }
@@ -150,15 +114,11 @@ define signext i8 @i8_signext_func_void() #0 {
 define i16 @i16_func_void() #0 {
   ; CHECK-LABEL: name: i16_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `i16 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i16, i16 addrspace(1)* undef
   ret i16 %val
 }
@@ -166,15 +126,11 @@ define i16 @i16_func_void() #0 {
 define zeroext i16 @i16_zeroext_func_void() #0 {
   ; CHECK-LABEL: name: i16_zeroext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `i16 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ZEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i16, i16 addrspace(1)* undef
   ret i16 %val
 }
@@ -182,15 +138,11 @@ define zeroext i16 @i16_zeroext_func_void() #0 {
 define signext i16 @i16_signext_func_void() #0 {
   ; CHECK-LABEL: name: i16_signext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `i16 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[SEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i16, i16 addrspace(1)* undef
   ret i16 %val
 }
@@ -198,15 +150,11 @@ define signext i16 @i16_signext_func_void() #0 {
 define half @f16_func_void() #0 {
   ; CHECK-LABEL: name: f16_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[DEF]](p1) :: (load (s16) from `half addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load half, half addrspace(1)* undef
   ret half %val
 }
@@ -214,15 +162,11 @@ define half @f16_func_void() #0 {
 define i24 @i24_func_void() #0 {
   ; CHECK-LABEL: name: i24_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `i24 addrspace(1)* undef`, align 4, addrspace 1)
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s24)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i24, i24 addrspace(1)* undef
   ret i24 %val
 }
@@ -230,15 +174,11 @@ define i24 @i24_func_void() #0 {
 define zeroext i24 @i24_zeroext_func_void() #0 {
   ; CHECK-LABEL: name: i24_zeroext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `i24 addrspace(1)* undef`, align 4, addrspace 1)
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s24)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ZEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i24, i24 addrspace(1)* undef
   ret i24 %val
 }
@@ -246,15 +186,11 @@ define zeroext i24 @i24_zeroext_func_void() #0 {
 define signext i24 @i24_signext_func_void() #0 {
   ; CHECK-LABEL: name: i24_signext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s24) = G_LOAD [[DEF]](p1) :: (load (s24) from `i24 addrspace(1)* undef`, align 4, addrspace 1)
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s24)
   ; CHECK-NEXT:   $vgpr0 = COPY [[SEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i24, i24 addrspace(1)* undef
   ret i24 %val
 }
@@ -262,9 +198,6 @@ define signext i24 @i24_signext_func_void() #0 {
 define <2 x i24> @v2i24_func_void() #0 {
   ; CHECK-LABEL: name: v2i24_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s24>) = G_LOAD [[DEF]](p1) :: (load (<2 x s24>) from `<2 x i24> addrspace(1)* undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s24), [[UV1:%[0-9]+]]:_(s24) = G_UNMERGE_VALUES [[LOAD]](<2 x s24>)
@@ -272,8 +205,7 @@ define <2 x i24> @v2i24_func_void() #0 {
   ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s24)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[ANYEXT1]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load <2 x i24>, <2 x i24> addrspace(1)* undef
   ret <2 x i24> %val
 }
@@ -281,9 +213,6 @@ define <2 x i24> @v2i24_func_void() #0 {
 define <3 x i24> @v3i24_func_void() #0 {
   ; CHECK-LABEL: name: v3i24_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<3 x s24>) = G_LOAD [[DEF]](p1) :: (load (<3 x s24>) from `<3 x i24> addrspace(1)* undef`, align 16, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s24), [[UV1:%[0-9]+]]:_(s24), [[UV2:%[0-9]+]]:_(s24) = G_UNMERGE_VALUES [[LOAD]](<3 x s24>)
@@ -293,8 +222,7 @@ define <3 x i24> @v3i24_func_void() #0 {
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[ANYEXT1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[ANYEXT2]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
   %val = load <3 x i24>, <3 x i24> addrspace(1)* undef
   ret <3 x i24> %val
 }
@@ -302,14 +230,10 @@ define <3 x i24> @v3i24_func_void() #0 {
 define i32 @i32_func_void() #0 {
   ; CHECK-LABEL: name: i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   $vgpr0 = COPY [[LOAD]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load i32, i32 addrspace(1)* undef
   ret i32 %val
 }
@@ -317,17 +241,13 @@ define i32 @i32_func_void() #0 {
 define i48 @i48_func_void() #0 {
   ; CHECK-LABEL: name: i48_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `i48 addrspace(1)* undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[LOAD]](s48)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ANYEXT]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load i48, i48 addrspace(1)* undef, align 8
   ret i48 %val
 }
@@ -335,17 +255,13 @@ define i48 @i48_func_void() #0 {
 define signext i48 @i48_signext_func_void() #0 {
   ; CHECK-LABEL: name: i48_signext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `i48 addrspace(1)* undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD]](s48)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SEXT]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load i48, i48 addrspace(1)* undef, align 8
   ret i48 %val
 }
@@ -353,17 +269,13 @@ define signext i48 @i48_signext_func_void() #0 {
 define zeroext i48 @i48_zeroext_func_void() #0 {
   ; CHECK-LABEL: name: i48_zeroext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s48) = G_LOAD [[DEF]](p1) :: (load (s48) from `i48 addrspace(1)* undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD]](s48)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ZEXT]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load i48, i48 addrspace(1)* undef, align 8
   ret i48 %val
 }
@@ -371,16 +283,12 @@ define zeroext i48 @i48_zeroext_func_void() #0 {
 define i64 @i64_func_void() #0 {
   ; CHECK-LABEL: name: i64_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load (s64) from `i64 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load i64, i64 addrspace(1)* undef
   ret i64 %val
 }
@@ -388,9 +296,6 @@ define i64 @i64_func_void() #0 {
 define i65 @i65_func_void() #0 {
   ; CHECK-LABEL: name: i65_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `i65 addrspace(1)* undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s96) = G_ANYEXT [[LOAD]](s65)
@@ -398,8 +303,7 @@ define i65 @i65_func_void() #0 {
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
   %val = load i65, i65 addrspace(1)* undef
   ret i65 %val
 }
@@ -407,9 +311,6 @@ define i65 @i65_func_void() #0 {
 define signext i65 @i65_signext_func_void() #0 {
   ; CHECK-LABEL: name: i65_signext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `i65 addrspace(1)* undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s96) = G_SEXT [[LOAD]](s65)
@@ -417,8 +318,7 @@ define signext i65 @i65_signext_func_void() #0 {
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
   %val = load i65, i65 addrspace(1)* undef
   ret i65 %val
 }
@@ -426,9 +326,6 @@ define signext i65 @i65_signext_func_void() #0 {
 define zeroext i65 @i65_zeroext_func_void() #0 {
   ; CHECK-LABEL: name: i65_zeroext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s65) = G_LOAD [[DEF]](p1) :: (load (s65) from `i65 addrspace(1)* undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s96) = G_ZEXT [[LOAD]](s65)
@@ -436,8 +333,7 @@ define zeroext i65 @i65_zeroext_func_void() #0 {
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
   %val = load i65, i65 addrspace(1)* undef
   ret i65 %val
 }
@@ -445,14 +341,10 @@ define zeroext i65 @i65_zeroext_func_void() #0 {
 define float @f32_func_void() #0 {
   ; CHECK-LABEL: name: f32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p1) :: (load (s32) from `float addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   $vgpr0 = COPY [[LOAD]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load float, float addrspace(1)* undef
   ret float %val
 }
@@ -460,16 +352,12 @@ define float @f32_func_void() #0 {
 define double @f64_func_void() #0 {
   ; CHECK-LABEL: name: f64_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[DEF]](p1) :: (load (s64) from `double addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load double, double addrspace(1)* undef
   ret double %val
 }
@@ -477,9 +365,6 @@ define double @f64_func_void() #0 {
 define <2 x double> @v2f64_func_void() #0 {
   ; CHECK-LABEL: name: v2f64_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load (<2 x s64>) from `<2 x double> addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
@@ -487,8 +372,7 @@ define <2 x double> @v2f64_func_void() #0 {
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %val = load <2 x double>, <2 x double> addrspace(1)* undef
   ret <2 x double> %val
 }
@@ -496,16 +380,12 @@ define <2 x double> @v2f64_func_void() #0 {
 define <2 x i32> @v2i32_func_void() #0 {
   ; CHECK-LABEL: name: v2i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[DEF]](p1) :: (load (<2 x s32>) from `<2 x i32> addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s32>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load <2 x i32>, <2 x i32> addrspace(1)* undef
   ret <2 x i32> %val
 }
@@ -513,17 +393,13 @@ define <2 x i32> @v2i32_func_void() #0 {
 define <3 x i32> @v3i32_func_void() #0 {
   ; CHECK-LABEL: name: v3i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[DEF]](p1) :: (load (<3 x s32>) from `<3 x i32> addrspace(1)* undef`, align 16, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<3 x s32>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
   %val = load <3 x i32>, <3 x i32> addrspace(1)* undef
   ret <3 x i32> %val
 }
@@ -531,9 +407,6 @@ define <3 x i32> @v3i32_func_void() #0 {
 define <4 x i32> @v4i32_func_void() #0 {
   ; CHECK-LABEL: name: v4i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[DEF]](p1) :: (load (<4 x s32>) from `<4 x i32> addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<4 x s32>)
@@ -541,8 +414,7 @@ define <4 x i32> @v4i32_func_void() #0 {
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %val = load <4 x i32>, <4 x i32> addrspace(1)* undef
   ret <4 x i32> %val
 }
@@ -550,9 +422,6 @@ define <4 x i32> @v4i32_func_void() #0 {
 define <5 x i32> @v5i32_func_void() #0 {
   ; CHECK-LABEL: name: v5i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<5 x s32>) = G_LOAD [[DEF]](p1) :: (volatile load (<5 x s32>) from `<5 x i32> addrspace(1)* undef`, align 32, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<5 x s32>)
@@ -561,8 +430,7 @@ define <5 x i32> @v5i32_func_void() #0 {
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
   ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4
   %val = load volatile <5 x i32>, <5 x i32> addrspace(1)* undef
   ret <5 x i32> %val
 }
@@ -570,9 +438,6 @@ define <5 x i32> @v5i32_func_void() #0 {
 define <8 x i32> @v8i32_func_void() #0 {
   ; CHECK-LABEL: name: v8i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<8 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s32>) from %ir.ptr, addrspace 1)
@@ -585,8 +450,7 @@ define <8 x i32> @v8i32_func_void() #0 {
   ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
   ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
   ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
   %ptr = load volatile <8 x i32> addrspace(1)*, <8 x i32> addrspace(1)* addrspace(4)* undef
   %val = load <8 x i32>, <8 x i32> addrspace(1)* %ptr
   ret <8 x i32> %val
@@ -595,9 +459,6 @@ define <8 x i32> @v8i32_func_void() #0 {
 define <16 x i32> @v16i32_func_void() #0 {
   ; CHECK-LABEL: name: v16i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s32>) from %ir.ptr, addrspace 1)
@@ -618,8 +479,7 @@ define <16 x i32> @v16i32_func_void() #0 {
   ; CHECK-NEXT:   $vgpr13 = COPY [[UV13]](s32)
   ; CHECK-NEXT:   $vgpr14 = COPY [[UV14]](s32)
   ; CHECK-NEXT:   $vgpr15 = COPY [[UV15]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
   %ptr = load volatile <16 x i32> addrspace(1)*, <16 x i32> addrspace(1)* addrspace(4)* undef
   %val = load <16 x i32>, <16 x i32> addrspace(1)* %ptr
   ret <16 x i32> %val
@@ -628,9 +488,6 @@ define <16 x i32> @v16i32_func_void() #0 {
 define <32 x i32> @v32i32_func_void() #0 {
   ; CHECK-LABEL: name: v32i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<32 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<32 x s32>) from %ir.ptr, addrspace 1)
@@ -667,8 +524,7 @@ define <32 x i32> @v32i32_func_void() #0 {
   ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
   ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
   ; CHECK-NEXT:   $vgpr31 = COPY [[UV31]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
   %ptr = load volatile <32 x i32> addrspace(1)*, <32 x i32> addrspace(1)* addrspace(4)* undef
   %val = load <32 x i32>, <32 x i32> addrspace(1)* %ptr
   ret <32 x i32> %val
@@ -677,9 +533,6 @@ define <32 x i32> @v32i32_func_void() #0 {
 define <2 x i64> @v2i64_func_void() #0 {
   ; CHECK-LABEL: name: v2i64_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s64>) = G_LOAD [[DEF]](p1) :: (load (<2 x s64>) from `<2 x i64> addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](<2 x s64>)
@@ -687,8 +540,7 @@ define <2 x i64> @v2i64_func_void() #0 {
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %val = load <2 x i64>, <2 x i64> addrspace(1)* undef
   ret <2 x i64> %val
 }
@@ -696,9 +548,6 @@ define <2 x i64> @v2i64_func_void() #0 {
 define <3 x i64> @v3i64_func_void() #0 {
   ; CHECK-LABEL: name: v3i64_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<3 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<3 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<3 x s64>) from %ir.ptr, align 32, addrspace 1)
@@ -709,8 +558,7 @@ define <3 x i64> @v3i64_func_void() #0 {
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
   ; CHECK-NEXT:   $vgpr4 = COPY [[UV4]](s32)
   ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5
   %ptr = load volatile <3 x i64> addrspace(1)*, <3 x i64> addrspace(1)* addrspace(4)* undef
   %val = load <3 x i64>, <3 x i64> addrspace(1)* %ptr
   ret <3 x i64> %val
@@ -719,9 +567,6 @@ define <3 x i64> @v3i64_func_void() #0 {
 define <4 x i64> @v4i64_func_void() #0 {
   ; CHECK-LABEL: name: v4i64_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<4 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<4 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<4 x s64>) from %ir.ptr, addrspace 1)
@@ -734,8 +579,7 @@ define <4 x i64> @v4i64_func_void() #0 {
   ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](s32)
   ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](s32)
   ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
   %ptr = load volatile <4 x i64> addrspace(1)*, <4 x i64> addrspace(1)* addrspace(4)* undef
   %val = load <4 x i64>, <4 x i64> addrspace(1)* %ptr
   ret <4 x i64> %val
@@ -744,9 +588,6 @@ define <4 x i64> @v4i64_func_void() #0 {
 define <5 x i64> @v5i64_func_void() #0 {
   ; CHECK-LABEL: name: v5i64_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<5 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<5 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<5 x s64>) from %ir.ptr, align 64, addrspace 1)
@@ -761,8 +602,7 @@ define <5 x i64> @v5i64_func_void() #0 {
   ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](s32)
   ; CHECK-NEXT:   $vgpr8 = COPY [[UV8]](s32)
   ; CHECK-NEXT:   $vgpr9 = COPY [[UV9]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9
   %ptr = load volatile <5 x i64> addrspace(1)*, <5 x i64> addrspace(1)* addrspace(4)* undef
   %val = load <5 x i64>, <5 x i64> addrspace(1)* %ptr
   ret <5 x i64> %val
@@ -771,9 +611,6 @@ define <5 x i64> @v5i64_func_void() #0 {
 define <8 x i64> @v8i64_func_void() #0 {
   ; CHECK-LABEL: name: v8i64_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<8 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<8 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s64>) from %ir.ptr, addrspace 1)
@@ -794,8 +631,7 @@ define <8 x i64> @v8i64_func_void() #0 {
   ; CHECK-NEXT:   $vgpr13 = COPY [[UV13]](s32)
   ; CHECK-NEXT:   $vgpr14 = COPY [[UV14]](s32)
   ; CHECK-NEXT:   $vgpr15 = COPY [[UV15]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
   %ptr = load volatile <8 x i64> addrspace(1)*, <8 x i64> addrspace(1)* addrspace(4)* undef
   %val = load <8 x i64>, <8 x i64> addrspace(1)* %ptr
   ret <8 x i64> %val
@@ -804,9 +640,6 @@ define <8 x i64> @v8i64_func_void() #0 {
 define <16 x i64> @v16i64_func_void() #0 {
   ; CHECK-LABEL: name: v16i64_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i64> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<16 x s64>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s64>) from %ir.ptr, addrspace 1)
@@ -843,8 +676,7 @@ define <16 x i64> @v16i64_func_void() #0 {
   ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
   ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
   ; CHECK-NEXT:   $vgpr31 = COPY [[UV31]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
   %ptr = load volatile <16 x i64> addrspace(1)*, <16 x i64> addrspace(1)* addrspace(4)* undef
   %val = load <16 x i64>, <16 x i64> addrspace(1)* %ptr
   ret <16 x i64> %val
@@ -853,14 +685,10 @@ define <16 x i64> @v16i64_func_void() #0 {
 define <2 x i16> @v2i16_func_void() #0 {
   ; CHECK-LABEL: name: v2i16_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load (<2 x s16>) from `<2 x i16> addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   $vgpr0 = COPY [[LOAD]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load <2 x i16>, <2 x i16> addrspace(1)* undef
   ret <2 x i16> %val
 }
@@ -868,14 +696,10 @@ define <2 x i16> @v2i16_func_void() #0 {
 define <2 x half> @v2f16_func_void() #0 {
   ; CHECK-LABEL: name: v2f16_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[DEF]](p1) :: (load (<2 x s16>) from `<2 x half> addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   $vgpr0 = COPY [[LOAD]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = load <2 x half>, <2 x half> addrspace(1)* undef
   ret <2 x half> %val
 }
@@ -883,9 +707,6 @@ define <2 x half> @v2f16_func_void() #0 {
 define <3 x i16> @v3i16_func_void() #0 {
   ; CHECK-LABEL: name: v3i16_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[DEF]](p1) :: (load (<3 x s16>) from `<3 x i16> addrspace(1)* undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[LOAD]](<3 x s16>)
@@ -894,8 +715,7 @@ define <3 x i16> @v3i16_func_void() #0 {
   ; CHECK-NEXT:   [[UV3:%[0-9]+]]:_(<2 x s16>), [[UV4:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s16>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV3]](<2 x s16>)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV4]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load <3 x i16>, <3 x i16> addrspace(1)* undef
   ret <3 x i16> %val
 }
@@ -903,16 +723,12 @@ define <3 x i16> @v3i16_func_void() #0 {
 define <4 x i16> @v4i16_func_void() #0 {
   ; CHECK-LABEL: name: v4i16_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load (<4 x s16>) from `<4 x i16> addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](<2 x s16>)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load <4 x i16>, <4 x i16> addrspace(1)* undef
   ret <4 x i16> %val
 }
@@ -920,16 +736,12 @@ define <4 x i16> @v4i16_func_void() #0 {
 define <4 x half> @v4f16_func_void() #0 {
   ; CHECK-LABEL: name: v4f16_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[DEF]](p1) :: (load (<4 x s16>) from `<4 x half> addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[LOAD]](<4 x s16>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](<2 x s16>)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load <4 x half>, <4 x half> addrspace(1)* undef
   ret <4 x half> %val
 }
@@ -937,9 +749,6 @@ define <4 x half> @v4f16_func_void() #0 {
 define <5 x i16> @v5i16_func_void() #0 {
   ; CHECK-LABEL: name: v5i16_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<5 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<5 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<5 x s16>) from %ir.ptr, align 16, addrspace 1)
@@ -950,8 +759,7 @@ define <5 x i16> @v5i16_func_void() #0 {
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV5]](<2 x s16>)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV6]](<2 x s16>)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV7]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
   %ptr = load volatile <5 x i16> addrspace(1)*, <5 x i16> addrspace(1)* addrspace(4)* undef
   %val = load <5 x i16>, <5 x i16> addrspace(1)* %ptr
   ret <5 x i16> %val
@@ -960,9 +768,6 @@ define <5 x i16> @v5i16_func_void() #0 {
 define <8 x i16> @v8i16_func_void() #0 {
   ; CHECK-LABEL: name: v8i16_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<8 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<8 x s16>) from %ir.ptr, addrspace 1)
@@ -971,8 +776,7 @@ define <8 x i16> @v8i16_func_void() #0 {
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](<2 x s16>)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](<2 x s16>)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %ptr = load volatile <8 x i16> addrspace(1)*, <8 x i16> addrspace(1)* addrspace(4)* undef
   %val = load <8 x i16>, <8 x i16> addrspace(1)* %ptr
   ret <8 x i16> %val
@@ -981,9 +785,6 @@ define <8 x i16> @v8i16_func_void() #0 {
 define <16 x i16> @v16i16_func_void() #0 {
   ; CHECK-LABEL: name: v16i16_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i16> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<16 x s16>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s16>) from %ir.ptr, addrspace 1)
@@ -996,8 +797,7 @@ define <16 x i16> @v16i16_func_void() #0 {
   ; CHECK-NEXT:   $vgpr5 = COPY [[UV5]](<2 x s16>)
   ; CHECK-NEXT:   $vgpr6 = COPY [[UV6]](<2 x s16>)
   ; CHECK-NEXT:   $vgpr7 = COPY [[UV7]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7
   %ptr = load volatile <16 x i16> addrspace(1)*, <16 x i16> addrspace(1)* addrspace(4)* undef
   %val = load <16 x i16>, <16 x i16> addrspace(1)* %ptr
   ret <16 x i16> %val
@@ -1006,9 +806,6 @@ define <16 x i16> @v16i16_func_void() #0 {
 define <16 x i8> @v16i8_func_void() #0 {
   ; CHECK-LABEL: name: v16i8_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<16 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[LOAD]](p1) :: (load (<16 x s8>) from %ir.ptr, addrspace 1)
@@ -1061,8 +858,7 @@ define <16 x i8> @v16i8_func_void() #0 {
   ; CHECK-NEXT:   $vgpr14 = COPY [[ANYEXT30]](s32)
   ; CHECK-NEXT:   [[ANYEXT31:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT15]](s16)
   ; CHECK-NEXT:   $vgpr15 = COPY [[ANYEXT31]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15
   %ptr = load volatile <16 x i8> addrspace(1)*, <16 x i8> addrspace(1)* addrspace(4)* undef
   %val = load <16 x i8>, <16 x i8> addrspace(1)* %ptr
   ret <16 x i8> %val
@@ -1071,9 +867,6 @@ define <16 x i8> @v16i8_func_void() #0 {
 define <2 x i8> @v2i8_func_void() #0 {
   ; CHECK-LABEL: name: v2i8_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<2 x s8>) = G_LOAD [[DEF]](p1) :: (load (<2 x s8>) from `<2 x i8> addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD]](<2 x s8>)
@@ -1083,8 +876,7 @@ define <2 x i8> @v2i8_func_void() #0 {
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT2]](s32)
   ; CHECK-NEXT:   [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT1]](s16)
   ; CHECK-NEXT:   $vgpr1 = COPY [[ANYEXT3]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load <2 x i8>, <2 x i8> addrspace(1)* undef
   ret <2 x i8> %val
 }
@@ -1092,9 +884,6 @@ define <2 x i8> @v2i8_func_void() #0 {
 define <3 x i8> @v3i8_func_void() #0 {
   ; CHECK-LABEL: name: v3i8_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<3 x s8>) = G_LOAD [[DEF]](p1) :: (load (<3 x s8>) from `<3 x i8> addrspace(1)* undef`, align 4, addrspace 1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[LOAD]](<3 x s8>)
@@ -1107,8 +896,7 @@ define <3 x i8> @v3i8_func_void() #0 {
   ; CHECK-NEXT:   $vgpr1 = COPY [[ANYEXT4]](s32)
   ; CHECK-NEXT:   [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT2]](s16)
   ; CHECK-NEXT:   $vgpr2 = COPY [[ANYEXT5]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
   %val = load <3 x i8>, <3 x i8> addrspace(1)* undef
   ret <3 x i8> %val
 }
@@ -1116,9 +904,6 @@ define <3 x i8> @v3i8_func_void() #0 {
 define <4  x i8> @v4i8_func_void() #0 {
   ; CHECK-LABEL: name: v4i8_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<4 x i8> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<4 x s8>) = G_LOAD [[LOAD]](p1) :: (load (<4 x s8>) from %ir.ptr, addrspace 1)
@@ -1135,8 +920,7 @@ define <4  x i8> @v4i8_func_void() #0 {
   ; CHECK-NEXT:   $vgpr2 = COPY [[ANYEXT6]](s32)
   ; CHECK-NEXT:   [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT3]](s16)
   ; CHECK-NEXT:   $vgpr3 = COPY [[ANYEXT7]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %ptr = load volatile <4  x i8> addrspace(1)*, <4  x i8> addrspace(1)* addrspace(4)* undef
   %val = load <4  x i8>, <4  x i8> addrspace(1)* %ptr
   ret <4  x i8> %val
@@ -1145,9 +929,6 @@ define <4  x i8> @v4i8_func_void() #0 {
 define {i8, i32} @struct_i8_i32_func_void() #0 {
   ; CHECK-LABEL: name: struct_i8_i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (load (s8) from `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
@@ -1156,8 +937,7 @@ define {i8, i32} @struct_i8_i32_func_void() #0 {
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[LOAD1]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = load { i8, i32 }, { i8, i32 } addrspace(1)* undef
   ret { i8, i32 } %val
 }
@@ -1165,20 +945,18 @@ define {i8, i32} @struct_i8_i32_func_void() #0 {
 define void @void_func_sret_struct_i8_i32({ i8, i32 } addrspace(5)* sret({ i8, i32 }) %arg0) #0 {
   ; CHECK-LABEL: name: void_func_sret_struct_i8_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[DEF]](p1) :: (volatile load (s8) from `i8 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY2]](p1) :: (volatile load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY1]](p1) :: (volatile load (s32) from `i32 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C]](s32)
   ; CHECK-NEXT:   G_STORE [[LOAD]](s8), [[COPY]](p5) :: (store (s8) into %ir.gep01, addrspace 5)
   ; CHECK-NEXT:   G_STORE [[LOAD1]](s32), [[PTR_ADD]](p5) :: (store (s32) into %ir.gep1, addrspace 5)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   %val0 = load volatile i8, i8 addrspace(1)* undef
   %val1 = load volatile i32, i32 addrspace(1)* undef
   %gep0 = getelementptr inbounds { i8, i32 }, { i8, i32 } addrspace(5)* %arg0, i32 0, i32 0
@@ -1195,16 +973,14 @@ define void @void_func_sret_struct_i8_i32({ i8, i32 } addrspace(5)* sret({ i8, i
 define <33 x i32> @v33i32_func_void() #0 {
   ; CHECK-LABEL: name: v33i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `<33 x i32> addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<33 x s32>) from %ir.ptr, align 256, addrspace 1)
   ; CHECK-NEXT:   G_STORE [[LOAD1]](<33 x s32>), [[COPY]](p5) :: (store (<33 x s32>), align 256, addrspace 5)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ptr = load volatile <33 x i32> addrspace(1)*, <33 x i32> addrspace(1)* addrspace(4)* undef
   %val = load <33 x i32>, <33 x i32> addrspace(1)* %ptr
   ret <33 x i32> %val
@@ -1213,23 +989,21 @@ define <33 x i32> @v33i32_func_void() #0 {
 define <33 x i32> @v33i32_func_v33i32_i32(<33 x i32> addrspace(1)* %p, i32 %idx) #0 {
   ; CHECK-LABEL: name: v33i32_func_v33i32_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY3]](s32)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
   ; CHECK-NEXT:   [[MUL:%[0-9]+]]:_(s64) = G_MUL [[SEXT]], [[C]]
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[MV]], [[MUL]](s64)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(p1) = COPY [[PTR_ADD]](p1)
-  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[COPY5]](p1) :: (load (<33 x s32>) from %ir.gep, align 256, addrspace 1)
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(p1) = COPY [[PTR_ADD]](p1)
+  ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<33 x s32>) = G_LOAD [[COPY4]](p1) :: (load (<33 x s32>) from %ir.gep, align 256, addrspace 1)
   ; CHECK-NEXT:   G_STORE [[LOAD]](<33 x s32>), [[COPY]](p5) :: (store (<33 x s32>), align 256, addrspace 5)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY6]]
+  ; CHECK-NEXT:   SI_RETURN
   %gep = getelementptr inbounds <33 x i32>, <33 x i32> addrspace(1)* %p, i32 %idx
   %val = load <33 x i32>, <33 x i32> addrspace(1)* %gep
   ret <33 x i32> %val
@@ -1238,10 +1012,9 @@ define <33 x i32> @v33i32_func_v33i32_i32(<33 x i32> addrspace(1)* %p, i32 %idx)
 define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
   ; CHECK-LABEL: name: struct_v32i32_i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `{ <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[LOAD]](p1) :: (load (<32 x s32>) from %ir.ptr, addrspace 1)
@@ -1252,8 +1025,7 @@ define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
   ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
   ; CHECK-NEXT:   G_STORE [[LOAD2]](s32), [[PTR_ADD1]](p5) :: (store (s32), align 128, addrspace 5)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ptr = load volatile { <32 x i32>, i32 } addrspace(1)*, { <32 x i32>, i32 } addrspace(1)* addrspace(4)* undef
   %val = load { <32 x i32>, i32 }, { <32 x i32>, i32 } addrspace(1)* %ptr
   ret { <32 x i32>, i32 }%val
@@ -1262,10 +1034,9 @@ define { <32 x i32>, i32 } @struct_v32i32_i32_func_void() #0 {
 define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
   ; CHECK-LABEL: name: struct_i32_v32i32_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (volatile load (p1) from `{ i32, <32 x i32> } addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p1) :: (load (s32) from %ir.ptr, align 128, addrspace 1)
@@ -1276,8 +1047,7 @@ define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
   ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
   ; CHECK-NEXT:   G_STORE [[LOAD2]](<32 x s32>), [[PTR_ADD1]](p5) :: (store (<32 x s32>), addrspace 5)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ptr = load volatile { i32, <32 x i32> } addrspace(1)*, { i32, <32 x i32> } addrspace(1)* addrspace(4)* undef
   %val = load { i32, <32 x i32> }, { i32, <32 x i32> } addrspace(1)* %ptr
   ret { i32, <32 x i32> }%val
@@ -1287,9 +1057,6 @@ define { i32, <32 x i32> } @struct_i32_v32i32_func_void() #0 {
 define { <3 x i32>, i32 } @v3i32_struct_func_void_wasted_reg() #0 {
   ; CHECK-LABEL: name: v3i32_struct_func_void_wasted_reg
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
@@ -1308,8 +1075,7 @@ define { <3 x i32>, i32 } @v3i32_struct_func_void_wasted_reg() #0 {
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[LOAD3]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %load0 = load volatile i32, i32 addrspace(3)* undef
   %load1 = load volatile i32, i32 addrspace(3)* undef
   %load2 = load volatile i32, i32 addrspace(3)* undef
@@ -1326,11 +1092,8 @@ define { <3 x i32>, i32 } @v3i32_struct_func_void_wasted_reg() #0 {
 define { <3 x float>, i32 } @v3f32_struct_func_void_wasted_reg() #0 {
   ; CHECK-LABEL: name: v3f32_struct_func_void_wasted_reg
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p3) = COPY [[DEF]](p3)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY [[DEF]](p3)
   ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:_(<3 x s32>) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -1339,7 +1102,7 @@ define { <3 x float>, i32 } @v3f32_struct_func_void_wasted_reg() #0 {
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `float addrspace(3)* undef`, addrspace 3)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `float addrspace(3)* undef`, addrspace 3)
   ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p3) :: (volatile load (s32) from `float addrspace(3)* undef`, addrspace 3)
-  ; CHECK-NEXT:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[COPY1]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
+  ; CHECK-NEXT:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (volatile load (s32) from `i32 addrspace(3)* undef`, addrspace 3)
   ; CHECK-NEXT:   [[IVEC:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[DEF1]], [[LOAD]](s32), [[C]](s32)
   ; CHECK-NEXT:   [[IVEC1:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC]], [[LOAD1]](s32), [[C1]](s32)
   ; CHECK-NEXT:   [[IVEC2:%[0-9]+]]:_(<3 x s32>) = G_INSERT_VECTOR_ELT [[IVEC1]], [[LOAD2]](s32), [[C2]](s32)
@@ -1348,8 +1111,7 @@ define { <3 x float>, i32 } @v3f32_struct_func_void_wasted_reg() #0 {
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[LOAD3]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %load0 = load volatile float, float addrspace(3)* undef
   %load1 = load volatile float, float addrspace(3)* undef
   %load2 = load volatile float, float addrspace(3)* undef
@@ -1366,10 +1128,9 @@ define { <3 x float>, i32 } @v3f32_struct_func_void_wasted_reg() #0 {
 define void @void_func_sret_max_known_zero_bits(i8 addrspace(5)* sret(i8) %arg0) #0 {
   ; CHECK-LABEL: name: void_func_sret_max_known_zero_bits
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 17
   ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 18
@@ -1381,8 +1142,7 @@ define void @void_func_sret_max_known_zero_bits(i8 addrspace(5)* sret(i8) %arg0)
   ; CHECK-NEXT:   G_STORE [[LSHR]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
   ; CHECK-NEXT:   G_STORE [[LSHR1]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
   ; CHECK-NEXT:   G_STORE [[LSHR2]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %arg0.int = ptrtoint i8 addrspace(5)* %arg0 to i32
 
   %lshr0 = lshr i32 %arg0.int, 16
@@ -1398,9 +1158,6 @@ define void @void_func_sret_max_known_zero_bits(i8 addrspace(5)* sret(i8) %arg0)
 define i1022 @i1022_func_void() #0 {
   ; CHECK-LABEL: name: i1022_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `i1022 addrspace(1)* undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s1024) = G_ANYEXT [[LOAD]](s1022)
@@ -1437,8 +1194,7 @@ define i1022 @i1022_func_void() #0 {
   ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
   ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
   ; CHECK-NEXT:   $vgpr31 = COPY [[UV31]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
   %val = load i1022, i1022 addrspace(1)* undef
   ret i1022 %val
 }
@@ -1446,9 +1202,6 @@ define i1022 @i1022_func_void() #0 {
 define signext i1022 @i1022_signext_func_void() #0 {
   ; CHECK-LABEL: name: i1022_signext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `i1022 addrspace(1)* undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s1024) = G_SEXT [[LOAD]](s1022)
@@ -1485,8 +1238,7 @@ define signext i1022 @i1022_signext_func_void() #0 {
   ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
   ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
   ; CHECK-NEXT:   $vgpr31 = COPY [[UV31]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
   %val = load i1022, i1022 addrspace(1)* undef
   ret i1022 %val
 }
@@ -1494,9 +1246,6 @@ define signext i1022 @i1022_signext_func_void() #0 {
 define zeroext i1022 @i1022_zeroext_func_void() #0 {
   ; CHECK-LABEL: name: i1022_zeroext_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s1022) = G_LOAD [[DEF]](p1) :: (load (s1022) from `i1022 addrspace(1)* undef`, align 8, addrspace 1)
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s1024) = G_ZEXT [[LOAD]](s1022)
@@ -1533,8 +1282,7 @@ define zeroext i1022 @i1022_zeroext_func_void() #0 {
   ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
   ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
   ; CHECK-NEXT:   $vgpr31 = COPY [[UV31]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $vgpr31
   %val = load i1022, i1022 addrspace(1)* undef
   ret i1022 %val
 }
@@ -1544,10 +1292,9 @@ define zeroext i1022 @i1022_zeroext_func_void() #0 {
 define %struct.with.ptrs @ptr_in_struct_func_void() #0 {
   ; CHECK-LABEL: name: ptr_in_struct_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(<32 x s32>) = G_LOAD [[DEF]](p1) :: (volatile load (<32 x s32>) from `%struct.with.ptrs addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 128
@@ -1569,8 +1316,7 @@ define %struct.with.ptrs @ptr_in_struct_func_void() #0 {
   ; CHECK-NEXT:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 144
   ; CHECK-NEXT:   [[PTR_ADD5:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C5]](s32)
   ; CHECK-NEXT:   G_STORE [[LOAD3]](<2 x p1>), [[PTR_ADD5]](p5) :: (store (<2 x p1>), addrspace 5)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %val = load volatile %struct.with.ptrs, %struct.with.ptrs addrspace(1)* undef
   ret %struct.with.ptrs %val
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-value.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-value.ll
index 420b9ef437b19..17a1a86cc649f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/global-value.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/global-value.ll
@@ -16,113 +16,89 @@
 define i32 addrspace(4)* @external_constant_got() {
   ; GCN-LABEL: name: external_constant_got
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN:   liveins: $sgpr30_sgpr31
-  ; GCN:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64(p4) = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @external_constant + 4, target-flags(amdgpu-gotprel32-hi) @external_constant + 12, implicit-def $scc
   ; GCN:   [[LOAD:%[0-9]+]]:_(p4) = G_LOAD [[SI_PC_ADD_REL_OFFSET]](p4) :: (dereferenceable invariant load (p4) from got, addrspace 4)
   ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](p4)
   ; GCN:   $vgpr0 = COPY [[UV]](s32)
   ; GCN:   $vgpr1 = COPY [[UV1]](s32)
-  ; GCN:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; GCN:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   ret i32 addrspace(4)* @external_constant
 }
 
 define i32 addrspace(1)* @external_global_got() {
   ; GCN-LABEL: name: external_global_got
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN:   liveins: $sgpr30_sgpr31
-  ; GCN:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64(p4) = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @external_global + 4, target-flags(amdgpu-gotprel32-hi) @external_global + 12, implicit-def $scc
   ; GCN:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[SI_PC_ADD_REL_OFFSET]](p4) :: (dereferenceable invariant load (p1) from got, addrspace 4)
   ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](p1)
   ; GCN:   $vgpr0 = COPY [[UV]](s32)
   ; GCN:   $vgpr1 = COPY [[UV1]](s32)
-  ; GCN:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; GCN:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   ret i32 addrspace(1)* @external_global
 }
 
 define i32 addrspace(999)* @external_other_got() {
   ; GCN-LABEL: name: external_other_got
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN:   liveins: $sgpr30_sgpr31
-  ; GCN:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64(p4) = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @external_other + 4, target-flags(amdgpu-gotprel32-hi) @external_other + 12, implicit-def $scc
   ; GCN:   [[LOAD:%[0-9]+]]:_(p999) = G_LOAD [[SI_PC_ADD_REL_OFFSET]](p4) :: (dereferenceable invariant load (p999) from got, addrspace 4)
   ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[LOAD]](p999)
   ; GCN:   $vgpr0 = COPY [[UV]](s32)
   ; GCN:   $vgpr1 = COPY [[UV1]](s32)
-  ; GCN:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; GCN:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   ret i32 addrspace(999)* @external_other
 }
 
 define i32 addrspace(4)* @internal_constant_pcrel() {
   ; GCN-LABEL: name: internal_constant_pcrel
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN:   liveins: $sgpr30_sgpr31
-  ; GCN:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64(p4) = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @internal_constant + 4, target-flags(amdgpu-rel32-hi) @internal_constant + 12, implicit-def $scc
   ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SI_PC_ADD_REL_OFFSET]](p4)
   ; GCN:   $vgpr0 = COPY [[UV]](s32)
   ; GCN:   $vgpr1 = COPY [[UV1]](s32)
-  ; GCN:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; GCN:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   ret i32 addrspace(4)* @internal_constant
 }
 
 define i32 addrspace(1)* @internal_global_pcrel() {
   ; GCN-LABEL: name: internal_global_pcrel
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN:   liveins: $sgpr30_sgpr31
-  ; GCN:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64(p1) = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @internal_global + 4, target-flags(amdgpu-rel32-hi) @internal_global + 12, implicit-def $scc
   ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SI_PC_ADD_REL_OFFSET]](p1)
   ; GCN:   $vgpr0 = COPY [[UV]](s32)
   ; GCN:   $vgpr1 = COPY [[UV1]](s32)
-  ; GCN:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; GCN:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   ret i32 addrspace(1)* @internal_global
 }
 
 define i32 addrspace(999)* @internal_other_pcrel() {
   ; GCN-LABEL: name: internal_other_pcrel
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN:   liveins: $sgpr30_sgpr31
-  ; GCN:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64(p999) = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @internal_other + 4, target-flags(amdgpu-rel32-hi) @internal_other + 12, implicit-def $scc
   ; GCN:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SI_PC_ADD_REL_OFFSET]](p999)
   ; GCN:   $vgpr0 = COPY [[UV]](s32)
   ; GCN:   $vgpr1 = COPY [[UV1]](s32)
-  ; GCN:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; GCN:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   ret i32 addrspace(999)* @internal_other
 }
 
 define i32 addrspace(6)* @external_constant32_got() {
   ; GCN-LABEL: name: external_constant32_got
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN:   liveins: $sgpr30_sgpr31
-  ; GCN:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64(p4) = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @external_constant32 + 4, target-flags(amdgpu-gotprel32-hi) @external_constant32 + 12, implicit-def $scc
   ; GCN:   [[LOAD:%[0-9]+]]:_(p4) = G_LOAD [[SI_PC_ADD_REL_OFFSET]](p4) :: (dereferenceable invariant load (p4) from got, addrspace 4)
   ; GCN:   [[EXTRACT:%[0-9]+]]:_(p6) = G_EXTRACT [[LOAD]](p4), 0
   ; GCN:   $vgpr0 = COPY [[EXTRACT]](p6)
-  ; GCN:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; GCN:   SI_RETURN implicit $vgpr0
   ret i32 addrspace(6)* @external_constant32
 }
 
 define i32 addrspace(6)* @internal_constant32_pcrel() {
   ; GCN-LABEL: name: internal_constant32_pcrel
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN:   liveins: $sgpr30_sgpr31
-  ; GCN:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN:   [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64(p4) = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @internal_constant32 + 4, target-flags(amdgpu-rel32-hi) @internal_constant32 + 12, implicit-def $scc
   ; GCN:   [[EXTRACT:%[0-9]+]]:_(p6) = G_EXTRACT [[SI_PC_ADD_REL_OFFSET]](p4), 0
   ; GCN:   $vgpr0 = COPY [[EXTRACT]](p6)
-  ; GCN:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; GCN:   SI_RETURN implicit $vgpr0
   ret i32 addrspace(6)* @internal_constant32
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/image-waterfall-loop-O0.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/image-waterfall-loop-O0.ll
index 96d107c371ea3..09a25d183c730 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/image-waterfall-loop-O0.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/image-waterfall-loop-O0.ll
@@ -33,17 +33,15 @@ define <4 x float> @waterfall_loop(<8 x i32> %vgpr_srd) {
 ; CHECK-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
 ; CHECK-NEXT:    buffer_store_dword v6, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
 ; CHECK-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
-; CHECK-NEXT:    v_writelane_b32 v15, s30, 0
-; CHECK-NEXT:    v_writelane_b32 v15, s31, 1
 ; CHECK-NEXT:    s_mov_b32 s8, 0
 ; CHECK-NEXT:    s_mov_b32 s4, s8
 ; CHECK-NEXT:    s_mov_b32 s5, s8
 ; CHECK-NEXT:    s_mov_b32 s6, s8
 ; CHECK-NEXT:    s_mov_b32 s7, s8
-; CHECK-NEXT:    v_writelane_b32 v15, s4, 2
-; CHECK-NEXT:    v_writelane_b32 v15, s5, 3
-; CHECK-NEXT:    v_writelane_b32 v15, s6, 4
-; CHECK-NEXT:    v_writelane_b32 v15, s7, 5
+; CHECK-NEXT:    v_writelane_b32 v15, s4, 0
+; CHECK-NEXT:    v_writelane_b32 v15, s5, 1
+; CHECK-NEXT:    v_writelane_b32 v15, s6, 2
+; CHECK-NEXT:    v_writelane_b32 v15, s7, 3
 ; CHECK-NEXT:    s_mov_b32 s6, 0
 ; CHECK-NEXT:    s_mov_b32 s4, s6
 ; CHECK-NEXT:    s_mov_b32 s5, s6
@@ -68,14 +66,14 @@ define <4 x float> @waterfall_loop(<8 x i32> %vgpr_srd) {
 ; CHECK-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
 ; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
 ; CHECK-NEXT:    s_mov_b32 s4, exec_lo
-; CHECK-NEXT:    v_writelane_b32 v15, s4, 6
+; CHECK-NEXT:    v_writelane_b32 v15, s4, 4
 ; CHECK-NEXT:  .LBB0_1: ; =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:32 ; 4-byte Folded Reload
 ; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
-; CHECK-NEXT:    v_readlane_b32 s16, v15, 2
-; CHECK-NEXT:    v_readlane_b32 s17, v15, 3
-; CHECK-NEXT:    v_readlane_b32 s18, v15, 4
-; CHECK-NEXT:    v_readlane_b32 s19, v15, 5
+; CHECK-NEXT:    v_readlane_b32 s16, v15, 0
+; CHECK-NEXT:    v_readlane_b32 s17, v15, 1
+; CHECK-NEXT:    v_readlane_b32 s18, v15, 2
+; CHECK-NEXT:    v_readlane_b32 s19, v15, 3
 ; CHECK-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
 ; CHECK-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
 ; CHECK-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
@@ -89,31 +87,31 @@ define <4 x float> @waterfall_loop(<8 x i32> %vgpr_srd) {
 ; CHECK-NEXT:    v_readfirstlane_b32 s23, v9
 ; CHECK-NEXT:    s_mov_b32 s4, s8
 ; CHECK-NEXT:    s_mov_b32 s5, s23
-; CHECK-NEXT:    v_writelane_b32 v15, s4, 7
-; CHECK-NEXT:    v_writelane_b32 v15, s5, 8
+; CHECK-NEXT:    v_writelane_b32 v15, s4, 5
+; CHECK-NEXT:    v_writelane_b32 v15, s5, 6
 ; CHECK-NEXT:    v_cmp_eq_u64_e64 s5, s[4:5], v[8:9]
 ; CHECK-NEXT:    v_readfirstlane_b32 s22, v6
 ; CHECK-NEXT:    v_readfirstlane_b32 s21, v7
 ; CHECK-NEXT:    s_mov_b32 s6, s22
 ; CHECK-NEXT:    s_mov_b32 s7, s21
-; CHECK-NEXT:    v_writelane_b32 v15, s6, 9
-; CHECK-NEXT:    v_writelane_b32 v15, s7, 10
+; CHECK-NEXT:    v_writelane_b32 v15, s6, 7
+; CHECK-NEXT:    v_writelane_b32 v15, s7, 8
 ; CHECK-NEXT:    v_cmp_eq_u64_e64 s4, s[6:7], v[6:7]
 ; CHECK-NEXT:    s_and_b32 s5, s4, s5
 ; CHECK-NEXT:    v_readfirstlane_b32 s20, v4
 ; CHECK-NEXT:    v_readfirstlane_b32 s7, v5
 ; CHECK-NEXT:    s_mov_b32 s10, s20
 ; CHECK-NEXT:    s_mov_b32 s11, s7
-; CHECK-NEXT:    v_writelane_b32 v15, s10, 11
-; CHECK-NEXT:    v_writelane_b32 v15, s11, 12
+; CHECK-NEXT:    v_writelane_b32 v15, s10, 9
+; CHECK-NEXT:    v_writelane_b32 v15, s11, 10
 ; CHECK-NEXT:    v_cmp_eq_u64_e64 s4, s[10:11], v[4:5]
 ; CHECK-NEXT:    s_and_b32 s9, s4, s5
 ; CHECK-NEXT:    v_readfirstlane_b32 s6, v2
 ; CHECK-NEXT:    v_readfirstlane_b32 s5, v3
 ; CHECK-NEXT:    s_mov_b32 s10, s6
 ; CHECK-NEXT:    s_mov_b32 s11, s5
-; CHECK-NEXT:    v_writelane_b32 v15, s10, 13
-; CHECK-NEXT:    v_writelane_b32 v15, s11, 14
+; CHECK-NEXT:    v_writelane_b32 v15, s10, 11
+; CHECK-NEXT:    v_writelane_b32 v15, s11, 12
 ; CHECK-NEXT:    v_cmp_eq_u64_e64 s4, s[10:11], v[2:3]
 ; CHECK-NEXT:    s_and_b32 s4, s4, s9
 ; CHECK-NEXT:    ; kill: def $sgpr8 killed $sgpr8 def $sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
@@ -124,14 +122,14 @@ define <4 x float> @waterfall_loop(<8 x i32> %vgpr_srd) {
 ; CHECK-NEXT:    s_mov_b32 s13, s7
 ; CHECK-NEXT:    s_mov_b32 s14, s6
 ; CHECK-NEXT:    s_mov_b32 s15, s5
-; CHECK-NEXT:    v_writelane_b32 v15, s8, 15
-; CHECK-NEXT:    v_writelane_b32 v15, s9, 16
-; CHECK-NEXT:    v_writelane_b32 v15, s10, 17
-; CHECK-NEXT:    v_writelane_b32 v15, s11, 18
-; CHECK-NEXT:    v_writelane_b32 v15, s12, 19
-; CHECK-NEXT:    v_writelane_b32 v15, s13, 20
-; CHECK-NEXT:    v_writelane_b32 v15, s14, 21
-; CHECK-NEXT:    v_writelane_b32 v15, s15, 22
+; CHECK-NEXT:    v_writelane_b32 v15, s8, 13
+; CHECK-NEXT:    v_writelane_b32 v15, s9, 14
+; CHECK-NEXT:    v_writelane_b32 v15, s10, 15
+; CHECK-NEXT:    v_writelane_b32 v15, s11, 16
+; CHECK-NEXT:    v_writelane_b32 v15, s12, 17
+; CHECK-NEXT:    v_writelane_b32 v15, s13, 18
+; CHECK-NEXT:    v_writelane_b32 v15, s14, 19
+; CHECK-NEXT:    v_writelane_b32 v15, s15, 20
 ; CHECK-NEXT:    s_and_saveexec_b32 s4, s4
 ; CHECK-NEXT:    image_sample v0, v[0:1], s[8:15], s[16:19] dmask:0x1 dim:SQ_RSRC_IMG_2D
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
@@ -139,22 +137,20 @@ define <4 x float> @waterfall_loop(<8 x i32> %vgpr_srd) {
 ; CHECK-NEXT:    s_xor_b32 exec_lo, exec_lo, s4
 ; CHECK-NEXT:    s_cbranch_execnz .LBB0_1
 ; CHECK-NEXT:  ; %bb.2:
-; CHECK-NEXT:    v_readlane_b32 s4, v15, 6
+; CHECK-NEXT:    v_readlane_b32 s4, v15, 4
 ; CHECK-NEXT:    s_mov_b32 exec_lo, s4
 ; CHECK-NEXT:  ; %bb.3:
-; CHECK-NEXT:    v_readlane_b32 s4, v15, 0
-; CHECK-NEXT:    v_readlane_b32 s5, v15, 1
 ; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
-; CHECK-NEXT:    ; implicit-def: $sgpr6
-; CHECK-NEXT:    v_mov_b32_e32 v1, s6
-; CHECK-NEXT:    v_mov_b32_e32 v2, s6
-; CHECK-NEXT:    v_mov_b32_e32 v3, s6
-; CHECK-NEXT:    s_or_saveexec_b32 s6, -1
+; CHECK-NEXT:    ; implicit-def: $sgpr4
+; CHECK-NEXT:    v_mov_b32_e32 v1, s4
+; CHECK-NEXT:    v_mov_b32_e32 v2, s4
+; CHECK-NEXT:    v_mov_b32_e32 v3, s4
+; CHECK-NEXT:    s_or_saveexec_b32 s4, -1
 ; CHECK-NEXT:    buffer_load_dword v15, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; CHECK-NEXT:    s_mov_b32 exec_lo, s6
+; CHECK-NEXT:    s_mov_b32 exec_lo, s4
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
 ; CHECK-NEXT:    s_waitcnt_vscnt null, 0x0
-; CHECK-NEXT:    s_setpc_b64 s[4:5]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
 bb:
   %ret = tail call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 1, float 0.000000e+00, float 0.000000e+00, <8 x i32> %vgpr_srd, <4 x i32> zeroinitializer, i1 false, i32 0, i32 0)
   ret <4 x float> %ret

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-mismatched-size.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-mismatched-size.ll
index 2d3ec011220fb..15b92df2f1601 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-mismatched-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-mismatched-size.ll
@@ -39,9 +39,7 @@ define i64 @return_type_is_too_big_scalar() {
   ; CHECK-LABEL: name: return_type_is_too_big_scalar
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10 /* regdef */, implicit-def $vgpr8
@@ -52,15 +50,11 @@ define i64 @return_type_is_too_big_scalar() {
 define i32 @return_type_is_too_small_scalar() {
   ; CHECK-LABEL: name: return_type_is_too_small_scalar
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10 /* regdef */, implicit-def $vgpr8_vgpr9
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr8_vgpr9
-  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr8_vgpr9
+  ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[TRUNC]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %reg = call i32 asm sideeffect "; def $0", "={v[8:9]}" ()
   ret i32 %reg
 }
@@ -69,9 +63,7 @@ define i8 addrspace(1)* @return_type_is_too_big_pointer() {
   ; CHECK-LABEL: name: return_type_is_too_big_pointer
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10 /* regdef */, implicit-def $vgpr8
@@ -83,9 +75,7 @@ define i8 addrspace(3)* @return_type_is_too_small_pointer() {
   ; CHECK-LABEL: name: return_type_is_too_small_pointer
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10 /* regdef */, implicit-def $vgpr8_vgpr9
@@ -96,7 +86,7 @@ define i8 addrspace(3)* @return_type_is_too_small_pointer() {
 define void @use_vector_too_small(<8 x i32> %arg) {
   ; CHECK-LABEL: name: use_vector_too_small
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -107,11 +97,9 @@ define void @use_vector_too_small(<8 x i32> %arg) {
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<8 x s32>)
   ; CHECK-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9 /* reguse */, $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY9]]
+  ; CHECK-NEXT:   SI_RETURN
   call void asm sideeffect "; use $0", "{v[0:7]}"(<8 x i32> %arg)
   ret void
 }
@@ -120,7 +108,7 @@ define void @use_vector_too_big(<8 x i32> %arg) {
   ; CHECK-LABEL: name: use_vector_too_big
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -131,7 +119,6 @@ define void @use_vector_too_big(<8 x i32> %arg) {
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1 (%ir-block.0):
   call void asm sideeffect "; use $0", "{v[0:9]}"(<8 x i32> %arg)
@@ -142,12 +129,11 @@ define void @use_scalar_too_small(i64 %arg) {
   ; CHECK-LABEL: name: use_scalar_too_small
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31, $vgpr0, $vgpr1
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1 (%ir-block.0):
   call void asm sideeffect "; use $0", "{v0}"(i64 %arg)
@@ -157,15 +143,13 @@ define void @use_scalar_too_small(i64 %arg) {
 define void @use_scalar_too_big(i32 %arg) {
   ; CHECK-LABEL: name: use_scalar_too_big
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s32)
   ; CHECK-NEXT:   $vgpr0_vgpr1 = COPY [[ANYEXT]](s64)
   ; CHECK-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9 /* reguse */, $vgpr0_vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   call void asm sideeffect "; use $0", "{v[0:1]}"(i32 %arg)
   ret void
 }
@@ -174,12 +158,11 @@ define void @use_pointer_too_small(i8 addrspace(1)* %arg) {
   ; CHECK-LABEL: name: use_pointer_too_small
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31, $vgpr0, $vgpr1
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1 (%ir-block.0):
   call void asm sideeffect "; use $0", "{v0}"(i8 addrspace(1)* %arg)
@@ -190,10 +173,9 @@ define void @use_pointer_too_big(i32 addrspace(3)* %arg) {
   ; CHECK-LABEL: name: use_pointer_too_big
   ; CHECK: bb.0:
   ; CHECK-NEXT:   successors: %bb.1(0x80000000)
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31, $vgpr0
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.1 (%ir-block.0):
   call void asm sideeffect "; use $0", "{v[0:1]}"(i32 addrspace(3)* %arg)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-assert-align.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-assert-align.ll
index 7124c8c700481..640314d522630 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-assert-align.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-assert-align.ll
@@ -5,16 +5,14 @@
 define void @arg_align_8(i8 addrspace(1)* align 8 %arg0) {
   ; CHECK-LABEL: name: arg_align_8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
   ; CHECK-NEXT:   G_STORE [[C]](s8), [[MV]](p1) :: (store (s8) into %ir.arg0, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store i8 0, i8 addrspace(1)* %arg0, align 8
   ret void
 }
@@ -25,7 +23,7 @@ declare align 8 i8 addrspace(1)* @returns_ptr_align8()
 define void @call_result_align_1() {
   ; CHECK-LABEL: name: call_result_align_1
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -35,36 +33,34 @@ define void @call_result_align_1() {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @returns_ptr
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @returns_ptr, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31, implicit-def $vgpr0, implicit-def $vgpr1
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY18]](s32), [[COPY19]](s32)
+  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY17]](s32), [[COPY18]](s32)
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
   ; CHECK-NEXT:   G_STORE [[C]](s8), [[MV]](p1) :: (store (s8) into %ir.ptr, addrspace 1)
-  ; CHECK-NEXT:   [[COPY20:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY20]]
+  ; CHECK-NEXT:   SI_RETURN
   %ptr = call align 1 i8 addrspace(1)* @returns_ptr()
   store i8 0, i8 addrspace(1)* %ptr, align 1
   ret void
@@ -73,7 +69,7 @@ define void @call_result_align_1() {
 define void @call_result_align_8() {
   ; CHECK-LABEL: name: call_result_align_8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -83,37 +79,35 @@ define void @call_result_align_8() {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @returns_ptr
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @returns_ptr, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31, implicit-def $vgpr0, implicit-def $vgpr1
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY18]](s32), [[COPY19]](s32)
+  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY17]](s32), [[COPY18]](s32)
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[ASSERT_ALIGN:%[0-9]+]]:_(p1) = G_ASSERT_ALIGN [[MV]], 8
   ; CHECK-NEXT:   G_STORE [[C]](s8), [[ASSERT_ALIGN]](p1) :: (store (s8) into %ir.ptr, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY20:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY20]]
+  ; CHECK-NEXT:   SI_RETURN
   %ptr = call align 8 i8 addrspace(1)* @returns_ptr()
   store i8 0, i8 addrspace(1)* %ptr, align 8
   ret void
@@ -122,7 +116,7 @@ define void @call_result_align_8() {
 define void @declaration_result_align_8() {
   ; CHECK-LABEL: name: declaration_result_align_8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -132,37 +126,35 @@ define void @declaration_result_align_8() {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @returns_ptr_align8
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @returns_ptr_align8, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31, implicit-def $vgpr0, implicit-def $vgpr1
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY18]](s32), [[COPY19]](s32)
+  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY17]](s32), [[COPY18]](s32)
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[ASSERT_ALIGN:%[0-9]+]]:_(p1) = G_ASSERT_ALIGN [[MV]], 8
   ; CHECK-NEXT:   G_STORE [[C]](s8), [[ASSERT_ALIGN]](p1) :: (store (s8) into %ir.ptr, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY20:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY20]]
+  ; CHECK-NEXT:   SI_RETURN
   %ptr = call i8 addrspace(1)* @returns_ptr_align8()
   store i8 0, i8 addrspace(1)* %ptr, align 8
   ret void
@@ -171,7 +163,7 @@ define void @declaration_result_align_8() {
 define i8 addrspace(1)* @tail_call_assert_align() {
   ; CHECK-LABEL: name: tail_call_assert_align
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -181,26 +173,25 @@ define i8 addrspace(1)* @tail_call_assert_align() {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @returns_ptr_align8
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; CHECK-NEXT:   SI_TCRETURN [[GV]](p0), @returns_ptr_align8, 0, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
 entry:
   %call = tail call i8 addrspace(1)* @returns_ptr_align8()

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-atomicrmw.ll
index 9ed3ec23a181e..5206284cf5723 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-atomicrmw.ll
@@ -4,15 +4,13 @@
 define float @test_atomicrmw_fadd(float addrspace(3)* %addr) {
   ; CHECK-LABEL: name: test_atomicrmw_fadd
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
   ; CHECK-NEXT:   [[ATOMICRMW_FADD:%[0-9]+]]:_(s32) = G_ATOMICRMW_FADD [[COPY]](p3), [[C]] :: (load store seq_cst (s32) on %ir.addr, addrspace 3)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ATOMICRMW_FADD]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %oldval = atomicrmw fadd float addrspace(3)* %addr, float 1.0 seq_cst
   ret float %oldval
 }
@@ -21,10 +19,9 @@ define float @test_atomicrmw_fsub(float addrspace(3)* %addr) {
   ; CHECK-LABEL: name: test_atomicrmw_fsub
   ; CHECK: bb.1 (%ir-block.0):
   ; CHECK-NEXT:   successors: %bb.2(0x80000000)
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p3) :: (load (s32) from %ir.addr, addrspace 3)
@@ -33,8 +30,8 @@ define float @test_atomicrmw_fsub(float addrspace(3)* %addr) {
   ; CHECK-NEXT: bb.2.atomicrmw.start:
   ; CHECK-NEXT:   successors: %bb.3(0x40000000), %bb.2(0x40000000)
   ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s64) = G_PHI %9(s64), %bb.2, [[C1]](s64), %bb.1
-  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[LOAD]](s32), %bb.1, %7(s32), %bb.2
+  ; CHECK-NEXT:   [[PHI:%[0-9]+]]:_(s64) = G_PHI %8(s64), %bb.2, [[C1]](s64), %bb.1
+  ; CHECK-NEXT:   [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[LOAD]](s32), %bb.1, %6(s32), %bb.2
   ; CHECK-NEXT:   [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[PHI1]], [[C]]
   ; CHECK-NEXT:   [[ATOMIC_CMPXCHG_WITH_SUCCESS:%[0-9]+]]:_(s32), [[ATOMIC_CMPXCHG_WITH_SUCCESS1:%[0-9]+]]:_(s1) = G_ATOMIC_CMPXCHG_WITH_SUCCESS [[COPY]](p3), [[PHI1]], [[FSUB]] :: (load store seq_cst seq_cst (s32) on %ir.2, addrspace 3)
   ; CHECK-NEXT:   [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.if.break), [[ATOMIC_CMPXCHG_WITH_SUCCESS1]](s1), [[PHI]](s64)
@@ -47,8 +44,7 @@ define float @test_atomicrmw_fsub(float addrspace(3)* %addr) {
   ; CHECK-NEXT:   [[PHI3:%[0-9]+]]:_(s64) = G_PHI [[INT]](s64), %bb.2
   ; CHECK-NEXT:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI3]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[PHI2]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %oldval = atomicrmw fsub float addrspace(3)* %addr, float 1.0 seq_cst
   ret float %oldval
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-abi-attribute-hints.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-abi-attribute-hints.ll
index 2e647da9b5df3..7015bd8f9c965 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-abi-attribute-hints.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-abi-attribute-hints.ll
@@ -125,7 +125,7 @@ define amdgpu_kernel void @kernel_call_no_other_sgprs() {
 define void @func_call_no_workitem_ids() {
   ; CHECK-LABEL: name: func_call_no_workitem_ids
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_32 = COPY $sgpr14
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr13
@@ -134,29 +134,27 @@ define void @func_call_no_workitem_ids() {
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @extern
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY15]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(s64) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY14]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY7]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY8]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY10]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY11]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY12]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY13]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @extern, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY7]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY16]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @extern() "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z"
   ret void
 }
@@ -164,32 +162,30 @@ define void @func_call_no_workitem_ids() {
 define void @func_call_no_workgroup_ids() {
   ; CHECK-LABEL: name: func_call_no_workgroup_ids
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @extern
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(p4) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(p4) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(s64) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY11]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY6]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY7]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY8]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY9]](s64)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY10]](s32)
+  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(p4) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(p4) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(p4) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(s64) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY10]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY5]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY6]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY7]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY8]](s64)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY9]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @extern, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $vgpr31
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY5]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY12]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @extern() "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z"
   ret void
 }
@@ -197,23 +193,21 @@ define void @func_call_no_workgroup_ids() {
 define void @func_call_no_other_sgprs() {
   ; CHECK-LABEL: name: func_call_no_other_sgprs
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr31, $sgpr8_sgpr9, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr31, $sgpr8_sgpr9
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @extern
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(p4) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY5]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY3]](p4)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY4]](s32)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p4) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY4]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY2]](p4)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY3]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @extern, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr8_sgpr9, implicit $vgpr31
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY6]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @extern() "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z"
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll
index b641b5872bf88..c04b66db03c8d 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-implicit-args.ll
@@ -116,7 +116,7 @@ define amdgpu_kernel void @test_call_external_void_func_i32([17 x i8]) #0 {
 define void @test_func_call_external_void_func_i32() #0 {
   ; GFX900-LABEL: name: test_func_call_external_void_func_i32
   ; GFX900: bb.1 (%ir-block.0):
-  ; GFX900-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GFX900-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GFX900-NEXT: {{  $}}
   ; GFX900-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GFX900-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -126,36 +126,34 @@ define void @test_func_call_external_void_func_i32() #0 {
   ; GFX900-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; GFX900-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; GFX900-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; GFX900-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX900-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 99
   ; GFX900-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GFX900-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_i32
-  ; GFX900-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GFX900-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GFX900-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GFX900-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GFX900-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GFX900-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GFX900-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GFX900-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GFX900-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GFX900-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GFX900-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GFX900-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GFX900-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GFX900-NEXT:   $vgpr0 = COPY [[C]](s32)
-  ; GFX900-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GFX900-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; GFX900-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; GFX900-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; GFX900-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; GFX900-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; GFX900-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; GFX900-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; GFX900-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; GFX900-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; GFX900-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GFX900-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; GFX900-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; GFX900-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; GFX900-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; GFX900-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; GFX900-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; GFX900-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; GFX900-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; GFX900-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; GFX900-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   ; GFX900-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; GFX900-NEXT:   [[COPY18:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; GFX900-NEXT:   S_SETPC_B64_return [[COPY18]]
+  ; GFX900-NEXT:   SI_RETURN
   ; GFX908-LABEL: name: test_func_call_external_void_func_i32
   ; GFX908: bb.1 (%ir-block.0):
-  ; GFX908-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GFX908-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GFX908-NEXT: {{  $}}
   ; GFX908-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GFX908-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -165,33 +163,31 @@ define void @test_func_call_external_void_func_i32() #0 {
   ; GFX908-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; GFX908-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; GFX908-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; GFX908-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX908-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 99
   ; GFX908-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GFX908-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_i32
-  ; GFX908-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GFX908-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GFX908-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GFX908-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GFX908-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GFX908-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GFX908-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GFX908-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GFX908-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GFX908-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GFX908-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GFX908-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GFX908-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GFX908-NEXT:   $vgpr0 = COPY [[C]](s32)
-  ; GFX908-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GFX908-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; GFX908-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; GFX908-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; GFX908-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; GFX908-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; GFX908-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; GFX908-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; GFX908-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; GFX908-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; GFX908-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GFX908-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; GFX908-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; GFX908-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; GFX908-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; GFX908-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; GFX908-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; GFX908-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; GFX908-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; GFX908-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; GFX908-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_void_func_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   ; GFX908-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; GFX908-NEXT:   [[COPY18:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; GFX908-NEXT:   S_SETPC_B64_return [[COPY18]]
+  ; GFX908-NEXT:   SI_RETURN
   call void @external_void_func_i32(i32 99)
   ret void
 }
@@ -378,7 +374,7 @@ define amdgpu_kernel void @test_call_external_void_func_v32i32([17 x i8]) #0 {
 define void @test_func_call_external_void_func_v32i32([17 x i8]) #0 {
   ; GFX900-LABEL: name: test_func_call_external_void_func_v32i32
   ; GFX900: bb.1 (%ir-block.1):
-  ; GFX900-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GFX900-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GFX900-NEXT: {{  $}}
   ; GFX900-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GFX900-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -439,19 +435,18 @@ define void @test_func_call_external_void_func_v32i32([17 x i8]) #0 {
   ; GFX900-NEXT:   [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr16
   ; GFX900-NEXT:   [[TRUNC32:%[0-9]+]]:_(s16) = G_TRUNC [[COPY24]](s32)
   ; GFX900-NEXT:   [[TRUNC33:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC32]](s16)
-  ; GFX900-NEXT:   [[COPY25:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX900-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; GFX900-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
   ; GFX900-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GFX900-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_v32i32
-  ; GFX900-NEXT:   [[COPY26:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GFX900-NEXT:   [[COPY27:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GFX900-NEXT:   [[COPY28:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GFX900-NEXT:   [[COPY29:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GFX900-NEXT:   [[COPY30:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GFX900-NEXT:   [[COPY31:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GFX900-NEXT:   [[COPY32:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GFX900-NEXT:   [[COPY33:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX900-NEXT:   [[COPY25:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX900-NEXT:   [[COPY26:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GFX900-NEXT:   [[COPY27:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GFX900-NEXT:   [[COPY28:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GFX900-NEXT:   [[COPY29:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX900-NEXT:   [[COPY30:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GFX900-NEXT:   [[COPY31:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GFX900-NEXT:   [[COPY32:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GFX900-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<32 x s32>)
   ; GFX900-NEXT:   [[AMDGPU_WAVE_ADDRESS:%[0-9]+]]:_(p5) = G_AMDGPU_WAVE_ADDRESS $sgpr32
   ; GFX900-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
@@ -488,23 +483,22 @@ define void @test_func_call_external_void_func_v32i32([17 x i8]) #0 {
   ; GFX900-NEXT:   $vgpr28 = COPY [[UV28]](s32)
   ; GFX900-NEXT:   $vgpr29 = COPY [[UV29]](s32)
   ; GFX900-NEXT:   $vgpr30 = COPY [[UV30]](s32)
-  ; GFX900-NEXT:   [[COPY34:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GFX900-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY34]](<4 x s32>)
-  ; GFX900-NEXT:   $sgpr4_sgpr5 = COPY [[COPY26]](p4)
-  ; GFX900-NEXT:   $sgpr6_sgpr7 = COPY [[COPY27]](p4)
-  ; GFX900-NEXT:   $sgpr8_sgpr9 = COPY [[COPY28]](p4)
-  ; GFX900-NEXT:   $sgpr10_sgpr11 = COPY [[COPY29]](s64)
-  ; GFX900-NEXT:   $sgpr12 = COPY [[COPY30]](s32)
-  ; GFX900-NEXT:   $sgpr13 = COPY [[COPY31]](s32)
-  ; GFX900-NEXT:   $sgpr14 = COPY [[COPY32]](s32)
-  ; GFX900-NEXT:   $vgpr31 = COPY [[COPY33]](s32)
+  ; GFX900-NEXT:   [[COPY33:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GFX900-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY33]](<4 x s32>)
+  ; GFX900-NEXT:   $sgpr4_sgpr5 = COPY [[COPY25]](p4)
+  ; GFX900-NEXT:   $sgpr6_sgpr7 = COPY [[COPY26]](p4)
+  ; GFX900-NEXT:   $sgpr8_sgpr9 = COPY [[COPY27]](p4)
+  ; GFX900-NEXT:   $sgpr10_sgpr11 = COPY [[COPY28]](s64)
+  ; GFX900-NEXT:   $sgpr12 = COPY [[COPY29]](s32)
+  ; GFX900-NEXT:   $sgpr13 = COPY [[COPY30]](s32)
+  ; GFX900-NEXT:   $sgpr14 = COPY [[COPY31]](s32)
+  ; GFX900-NEXT:   $vgpr31 = COPY [[COPY32]](s32)
   ; GFX900-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_void_func_v32i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   ; GFX900-NEXT:   ADJCALLSTACKDOWN 0, 4, implicit-def $scc
-  ; GFX900-NEXT:   [[COPY35:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY25]]
-  ; GFX900-NEXT:   S_SETPC_B64_return [[COPY35]]
+  ; GFX900-NEXT:   SI_RETURN
   ; GFX908-LABEL: name: test_func_call_external_void_func_v32i32
   ; GFX908: bb.1 (%ir-block.1):
-  ; GFX908-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GFX908-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GFX908-NEXT: {{  $}}
   ; GFX908-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GFX908-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -565,19 +559,18 @@ define void @test_func_call_external_void_func_v32i32([17 x i8]) #0 {
   ; GFX908-NEXT:   [[COPY24:%[0-9]+]]:_(s32) = COPY $vgpr16
   ; GFX908-NEXT:   [[TRUNC32:%[0-9]+]]:_(s16) = G_TRUNC [[COPY24]](s32)
   ; GFX908-NEXT:   [[TRUNC33:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC32]](s16)
-  ; GFX908-NEXT:   [[COPY25:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX908-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; GFX908-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
   ; GFX908-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GFX908-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_v32i32
-  ; GFX908-NEXT:   [[COPY26:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GFX908-NEXT:   [[COPY27:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GFX908-NEXT:   [[COPY28:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GFX908-NEXT:   [[COPY29:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GFX908-NEXT:   [[COPY30:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GFX908-NEXT:   [[COPY31:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GFX908-NEXT:   [[COPY32:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GFX908-NEXT:   [[COPY33:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GFX908-NEXT:   [[COPY25:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GFX908-NEXT:   [[COPY26:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GFX908-NEXT:   [[COPY27:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GFX908-NEXT:   [[COPY28:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GFX908-NEXT:   [[COPY29:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GFX908-NEXT:   [[COPY30:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GFX908-NEXT:   [[COPY31:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GFX908-NEXT:   [[COPY32:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GFX908-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32), [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32), [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32), [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32), [[UV20:%[0-9]+]]:_(s32), [[UV21:%[0-9]+]]:_(s32), [[UV22:%[0-9]+]]:_(s32), [[UV23:%[0-9]+]]:_(s32), [[UV24:%[0-9]+]]:_(s32), [[UV25:%[0-9]+]]:_(s32), [[UV26:%[0-9]+]]:_(s32), [[UV27:%[0-9]+]]:_(s32), [[UV28:%[0-9]+]]:_(s32), [[UV29:%[0-9]+]]:_(s32), [[UV30:%[0-9]+]]:_(s32), [[UV31:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<32 x s32>)
   ; GFX908-NEXT:   [[AMDGPU_WAVE_ADDRESS:%[0-9]+]]:_(p5) = G_AMDGPU_WAVE_ADDRESS $sgpr32
   ; GFX908-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
@@ -614,20 +607,19 @@ define void @test_func_call_external_void_func_v32i32([17 x i8]) #0 {
   ; GFX908-NEXT:   $vgpr28 = COPY [[UV28]](s32)
   ; GFX908-NEXT:   $vgpr29 = COPY [[UV29]](s32)
   ; GFX908-NEXT:   $vgpr30 = COPY [[UV30]](s32)
-  ; GFX908-NEXT:   [[COPY34:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GFX908-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY34]](<4 x s32>)
-  ; GFX908-NEXT:   $sgpr4_sgpr5 = COPY [[COPY26]](p4)
-  ; GFX908-NEXT:   $sgpr6_sgpr7 = COPY [[COPY27]](p4)
-  ; GFX908-NEXT:   $sgpr8_sgpr9 = COPY [[COPY28]](p4)
-  ; GFX908-NEXT:   $sgpr10_sgpr11 = COPY [[COPY29]](s64)
-  ; GFX908-NEXT:   $sgpr12 = COPY [[COPY30]](s32)
-  ; GFX908-NEXT:   $sgpr13 = COPY [[COPY31]](s32)
-  ; GFX908-NEXT:   $sgpr14 = COPY [[COPY32]](s32)
-  ; GFX908-NEXT:   $vgpr31 = COPY [[COPY33]](s32)
+  ; GFX908-NEXT:   [[COPY33:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GFX908-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY33]](<4 x s32>)
+  ; GFX908-NEXT:   $sgpr4_sgpr5 = COPY [[COPY25]](p4)
+  ; GFX908-NEXT:   $sgpr6_sgpr7 = COPY [[COPY26]](p4)
+  ; GFX908-NEXT:   $sgpr8_sgpr9 = COPY [[COPY27]](p4)
+  ; GFX908-NEXT:   $sgpr10_sgpr11 = COPY [[COPY28]](s64)
+  ; GFX908-NEXT:   $sgpr12 = COPY [[COPY29]](s32)
+  ; GFX908-NEXT:   $sgpr13 = COPY [[COPY30]](s32)
+  ; GFX908-NEXT:   $sgpr14 = COPY [[COPY31]](s32)
+  ; GFX908-NEXT:   $vgpr31 = COPY [[COPY32]](s32)
   ; GFX908-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_void_func_v32i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   ; GFX908-NEXT:   ADJCALLSTACKDOWN 0, 4, implicit-def $scc
-  ; GFX908-NEXT:   [[COPY35:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY25]]
-  ; GFX908-NEXT:   S_SETPC_B64_return [[COPY35]]
+  ; GFX908-NEXT:   SI_RETURN
   call void @external_void_func_v32i32(<32 x i32> zeroinitializer)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-non-fixed.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-non-fixed.ll
index 67cb7683fbf5b..cb06c47525207 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-non-fixed.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-non-fixed.ll
@@ -11,17 +11,13 @@ declare hidden amdgpu_gfx void @external_gfx_void_func_struct_i8_i32_inreg({ i8,
 define amdgpu_gfx void @test_gfx_call_external_void_func_void() #0 {
   ; CHECK-LABEL: name: test_gfx_call_external_void_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_void_func_void
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY]](<4 x s32>)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_void_func_void, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return_gfx [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   call amdgpu_gfx void @external_gfx_void_func_void()
   ret void
 }
@@ -29,20 +25,18 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_void() #0 {
 define amdgpu_gfx void @test_gfx_call_external_void_func_i32_imm(i32) #0 {
   ; CHECK-LABEL: name: test_gfx_call_external_void_func_i32_imm
   ; CHECK: bb.1 (%ir-block.1):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_void_func_i32
   ; CHECK-NEXT:   $vgpr0 = COPY [[C]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_void_func_i32, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return_gfx [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   call amdgpu_gfx void @external_gfx_void_func_i32(i32 42)
   ret void
 }
@@ -50,20 +44,18 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_i32_imm(i32) #0 {
 define amdgpu_gfx void @test_gfx_call_external_void_func_i32_imm_inreg(i32 inreg) #0 {
   ; CHECK-LABEL: name: test_gfx_call_external_void_func_i32_imm_inreg
   ; CHECK: bb.1 (%ir-block.1):
-  ; CHECK-NEXT:   liveins: $sgpr4, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr4
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr4
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_void_func_i32_inreg
   ; CHECK-NEXT:   $sgpr4 = COPY [[C]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_void_func_i32_inreg, csr_amdgpu_si_gfx, implicit $sgpr4, implicit $sgpr0_sgpr1_sgpr2_sgpr3
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return_gfx [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   call amdgpu_gfx void @external_gfx_void_func_i32_inreg(i32 inreg 42)
   ret void
 }
@@ -71,9 +63,6 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_i32_imm_inreg(i32 inreg
 define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32() #0 {
   ; CHECK-LABEL: name: test_gfx_call_external_void_func_struct_i8_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load (p1) from `{ i8, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: (load (s8) from %ir.ptr0, align 4, addrspace 1)
@@ -86,12 +75,11 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32() #0 {
   ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT1]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[LOAD2]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY]](<4 x s32>)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_void_func_struct_i8_i32, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return_gfx [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ptr0 = load { i8, i32 } addrspace(1)*, { i8, i32 } addrspace(1)* addrspace(4)* undef
   %val = load { i8, i32 }, { i8, i32 } addrspace(1)* %ptr0
   call amdgpu_gfx void @external_gfx_void_func_struct_i8_i32({ i8, i32 } %val)
@@ -101,9 +89,6 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32() #0 {
 define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32_inreg() #0 {
   ; CHECK-LABEL: name: test_gfx_call_external_void_func_struct_i8_i32_inreg
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load (p1) from `{ i8, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: (load (s8) from %ir.ptr0, align 4, addrspace 1)
@@ -116,12 +101,11 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32_inreg() #
   ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
   ; CHECK-NEXT:   $sgpr4 = COPY [[ANYEXT1]](s32)
   ; CHECK-NEXT:   $sgpr5 = COPY [[LOAD2]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY]](<4 x s32>)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_void_func_struct_i8_i32_inreg, csr_amdgpu_si_gfx, implicit $sgpr4, implicit $sgpr5, implicit $sgpr0_sgpr1_sgpr2_sgpr3
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return_gfx [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ptr0 = load { i8, i32 } addrspace(1)*, { i8, i32 } addrspace(1)* addrspace(4)* undef
   %val = load { i8, i32 }, { i8, i32 } addrspace(1)* %ptr0
   call amdgpu_gfx void @external_gfx_void_func_struct_i8_i32_inreg({ i8, i32 } inreg %val)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-return-values.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-return-values.ll
index 47461bf5e17d3..cdc7568b3e0c8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-return-values.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call-return-values.ll
@@ -129,24 +129,22 @@ define amdgpu_kernel void @test_call_external_i32_func_i32_imm(i32 addrspace(1)*
 define amdgpu_gfx void @test_gfx_call_external_i32_func_i32_imm(i32 addrspace(1)* %out) #0 {
   ; GCN-LABEL: name: test_gfx_call_external_i32_func_i32_imm
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; GCN-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
   ; GCN-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GCN-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_i32_func_i32
   ; GCN-NEXT:   $vgpr0 = COPY [[C]](s32)
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
   ; GCN-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_i32_func_i32, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; GCN-NEXT:   G_STORE [[COPY4]](s32), [[MV]](p1) :: (volatile store (s32) into %ir.out, addrspace 1)
-  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY2]]
-  ; GCN-NEXT:   S_SETPC_B64_return_gfx [[COPY5]]
+  ; GCN-NEXT:   G_STORE [[COPY3]](s32), [[MV]](p1) :: (volatile store (s32) into %ir.out, addrspace 1)
+  ; GCN-NEXT:   SI_RETURN
   %val = call amdgpu_gfx i32 @external_gfx_i32_func_i32(i32 42)
   store volatile i32 %val, i32 addrspace(1)* %out
   ret void
@@ -212,21 +210,17 @@ define amdgpu_kernel void @test_call_external_i1_func_void() #0 {
 define amdgpu_gfx void @test_gfx_call_external_i1_func_void() #0 {
   ; GCN-LABEL: name: test_gfx_call_external_i1_func_void
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $sgpr30_sgpr31
-  ; GCN-NEXT: {{  $}}
-  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; GCN-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GCN-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_i1_func_void
-  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY]](<4 x s32>)
   ; GCN-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_i1_func_void, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; GCN-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY2]](s32)
+  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY1]](s32)
   ; GCN-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
   ; GCN-NEXT:   G_STORE [[TRUNC]](s1), [[DEF]](p1) :: (volatile store (s1) into `i1 addrspace(1)* undef`, addrspace 1)
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN-NEXT:   S_SETPC_B64_return_gfx [[COPY3]]
+  ; GCN-NEXT:   SI_RETURN
   %val = call amdgpu_gfx i1 @external_gfx_i1_func_void()
   store volatile i1 %val, i1 addrspace(1)* undef
   ret void
@@ -411,22 +405,18 @@ define amdgpu_kernel void @test_call_external_i8_func_void() #0 {
 define amdgpu_gfx void @test_gfx_call_external_i8_func_void() #0 {
   ; GCN-LABEL: name: test_gfx_call_external_i8_func_void
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $sgpr30_sgpr31
-  ; GCN-NEXT: {{  $}}
-  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; GCN-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GCN-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_i8_func_void
-  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY]](<4 x s32>)
   ; GCN-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_i8_func_void, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; GCN-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
+  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
   ; GCN-NEXT:   [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC]](s16)
   ; GCN-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
   ; GCN-NEXT:   G_STORE [[TRUNC1]](s8), [[DEF]](p1) :: (volatile store (s8) into `i8 addrspace(1)* undef`, addrspace 1)
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN-NEXT:   S_SETPC_B64_return_gfx [[COPY3]]
+  ; GCN-NEXT:   SI_RETURN
   %val = call amdgpu_gfx i8 @external_gfx_i8_func_void()
   store volatile i8 %val, i8 addrspace(1)* undef
   ret void
@@ -786,20 +776,16 @@ define amdgpu_kernel void @test_call_external_i32_func_void() #0 {
 define amdgpu_gfx void @test_gfx_call_external_i32_func_void() #0 {
   ; GCN-LABEL: name: test_gfx_call_external_i32_func_void
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $sgpr30_sgpr31
-  ; GCN-NEXT: {{  $}}
-  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; GCN-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GCN-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_i32_func_void
-  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY]](<4 x s32>)
   ; GCN-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_i32_func_void, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; GCN-NEXT:   G_STORE [[COPY2]](s32), [[DEF]](p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN-NEXT:   S_SETPC_B64_return_gfx [[COPY3]]
+  ; GCN-NEXT:   G_STORE [[COPY1]](s32), [[DEF]](p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+  ; GCN-NEXT:   SI_RETURN
   %val = call amdgpu_gfx i32 @external_gfx_i32_func_void()
   store volatile i32 %val, i32 addrspace(1)* undef
   ret void
@@ -2509,25 +2495,21 @@ define amdgpu_kernel void @test_call_external_i32_i64_func_void() #0 {
 define amdgpu_gfx void @test_gfx_call_external_i32_i64_func_void() #0 {
   ; GCN-LABEL: name: test_gfx_call_external_i32_i64_func_void
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $sgpr30_sgpr31
-  ; GCN-NEXT: {{  $}}
-  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
-  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
+  ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; GCN-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GCN-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_i32_i64_func_void
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
   ; GCN-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_i32_i64_func_void, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0, implicit-def $vgpr1, implicit-def $vgpr2
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr2
-  ; GCN-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
+  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr2
+  ; GCN-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY3]](s32), [[COPY4]](s32)
   ; GCN-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; GCN-NEXT:   G_STORE [[COPY3]](s32), [[DEF]](p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; GCN-NEXT:   G_STORE [[MV]](s64), [[COPY1]](p1) :: (volatile store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
-  ; GCN-NEXT:   [[COPY6:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN-NEXT:   S_SETPC_B64_return_gfx [[COPY6]]
+  ; GCN-NEXT:   G_STORE [[COPY2]](s32), [[DEF]](p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+  ; GCN-NEXT:   G_STORE [[MV]](s64), [[COPY]](p1) :: (volatile store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
+  ; GCN-NEXT:   SI_RETURN
   %val = call amdgpu_gfx { i32, i64 } @external_gfx_i32_i64_func_void()
   %val.0 = extractvalue { i32, i64 } %val, 0
   %val.1 = extractvalue { i32, i64 } %val, 1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll
index f9a83e6f3ab3e..8566590adcd32 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-call.ll
@@ -149,17 +149,13 @@ define amdgpu_kernel void @test_call_external_void_func_void() #0 {
 define amdgpu_gfx void @test_gfx_call_external_void_func_void() #0 {
   ; CHECK-LABEL: name: test_gfx_call_external_void_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_void_func_void
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY]](<4 x s32>)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_void_func_void, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return_gfx [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   call amdgpu_gfx void @external_gfx_void_func_void()
   ret void
 }
@@ -167,7 +163,7 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_void() #0 {
 define void @test_func_call_external_void_func_void() #0 {
   ; CHECK-LABEL: name: test_func_call_external_void_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -177,31 +173,29 @@ define void @test_func_call_external_void_func_void() #0 {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_void
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_void_func_void, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY18]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @external_void_func_void()
   ret void
 }
@@ -889,20 +883,18 @@ define amdgpu_kernel void @test_call_external_void_func_i32_imm(i32) #0 {
 define amdgpu_gfx void @test_gfx_call_external_void_func_i32_imm(i32) #0 {
   ; CHECK-LABEL: name: test_gfx_call_external_void_func_i32_imm
   ; CHECK: bb.1 (%ir-block.1):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_void_func_i32
   ; CHECK-NEXT:   $vgpr0 = COPY [[C]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_void_func_i32, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return_gfx [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   call amdgpu_gfx void @external_gfx_void_func_i32(i32 42)
   ret void
 }
@@ -910,20 +902,18 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_i32_imm(i32) #0 {
 define amdgpu_gfx void @test_gfx_call_external_void_func_i32_imm_inreg(i32 inreg) #0 {
   ; CHECK-LABEL: name: test_gfx_call_external_void_func_i32_imm_inreg
   ; CHECK: bb.1 (%ir-block.1):
-  ; CHECK-NEXT:   liveins: $sgpr4, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr4
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr4
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 42
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_gfx_void_func_i32_inreg
   ; CHECK-NEXT:   $sgpr4 = COPY [[C]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_void_func_i32_inreg, csr_amdgpu_si_gfx, implicit $sgpr4, implicit $sgpr0_sgpr1_sgpr2_sgpr3
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return_gfx [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   call amdgpu_gfx void @external_gfx_void_func_i32_inreg(i32 inreg 42)
   ret void
 }
@@ -3882,9 +3872,6 @@ define amdgpu_kernel void @test_call_external_void_func_struct_i8_i32() #0 {
 define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32() #0 {
   ; CHECK-LABEL: name: test_gfx_call_external_void_func_struct_i8_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load (p1) from `{ i8, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: (load (s8) from %ir.ptr0, align 4, addrspace 1)
@@ -3897,12 +3884,11 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32() #0 {
   ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT1]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[LOAD2]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY]](<4 x s32>)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_void_func_struct_i8_i32, csr_amdgpu_si_gfx, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return_gfx [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ptr0 = load { i8, i32 } addrspace(1)*, { i8, i32 } addrspace(1)* addrspace(4)* undef
   %val = load { i8, i32 }, { i8, i32 } addrspace(1)* %ptr0
   call amdgpu_gfx void @external_gfx_void_func_struct_i8_i32({ i8, i32 } %val)
@@ -3912,9 +3898,6 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32() #0 {
 define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32_inreg() #0 {
   ; CHECK-LABEL: name: test_gfx_call_external_void_func_struct_i8_i32_inreg
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p4) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[DEF]](p4) :: (load (p1) from `{ i8, i32 } addrspace(1)* addrspace(4)* undef`, addrspace 4)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p1) :: (load (s8) from %ir.ptr0, align 4, addrspace 1)
@@ -3927,12 +3910,11 @@ define amdgpu_gfx void @test_gfx_call_external_void_func_struct_i8_i32_inreg() #
   ; CHECK-NEXT:   [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ANYEXT]](s16)
   ; CHECK-NEXT:   $sgpr4 = COPY [[ANYEXT1]](s32)
   ; CHECK-NEXT:   $sgpr5 = COPY [[LOAD2]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY1]](<4 x s32>)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY]](<4 x s32>)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_gfx_void_func_struct_i8_i32_inreg, csr_amdgpu_si_gfx, implicit $sgpr4, implicit $sgpr5, implicit $sgpr0_sgpr1_sgpr2_sgpr3
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return_gfx [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ptr0 = load { i8, i32 } addrspace(1)*, { i8, i32 } addrspace(1)* addrspace(4)* undef
   %val = load { i8, i32 }, { i8, i32 } addrspace(1)* %ptr0
   call amdgpu_gfx void @external_gfx_void_func_struct_i8_i32_inreg({ i8, i32 } inreg %val)
@@ -4013,7 +3995,7 @@ declare void @void_func_byval_a3i32_byval_i8_align32([3 x i32] addrspace(5)* byv
 define void @call_byval_3ai32_byval_i8_align32([3 x i32] addrspace(5)* %incoming0, i8 addrspace(5)* align 32 %incoming1) #0 {
   ; CHECK-LABEL: name: call_byval_3ai32_byval_i8_align32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -4025,18 +4007,17 @@ define void @call_byval_3ai32_byval_i8_align32([3 x i32] addrspace(5)* %incoming
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
   ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p5) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p5) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 999
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @void_func_byval_a3i32_byval_i8_align32
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; CHECK-NEXT:   [[AMDGPU_WAVE_ADDRESS:%[0-9]+]]:_(p5) = G_AMDGPU_WAVE_ADDRESS $sgpr32
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C1]](s32)
@@ -4047,20 +4028,19 @@ define void @call_byval_3ai32_byval_i8_align32([3 x i32] addrspace(5)* %incoming
   ; CHECK-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   G_MEMCPY [[PTR_ADD1]](p5), [[COPY9]](p5), [[C4]](s32), 0 :: (dereferenceable store (s8) into stack + 32, align 32, addrspace 5), (dereferenceable load (s8) from %ir.incoming1, align 32, addrspace 5)
   ; CHECK-NEXT:   $vgpr0 = COPY [[C]](s32)
-  ; CHECK-NEXT:   [[COPY19:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY19]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY12]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY13]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY14]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY16]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY17]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY18]](s32)
+  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY12]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY15]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY16]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY17]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @void_func_byval_a3i32_byval_i8_align32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 36, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY20:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY10]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY20]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @void_func_byval_a3i32_byval_i8_align32([3 x i32] addrspace(5)* byval([3 x i32]) %incoming0, i8 addrspace(5)* align 32 %incoming1, i32 999)
   ret void
 }
@@ -4072,7 +4052,7 @@ declare void @void_func_byval_a4i64_align4([4 x i64] addrspace(5)* byval([4 x i6
 define void @call_byval_a4i64_align4_higher_source_align([4 x i64] addrspace(5)* align 256 %incoming_high_align) #0 {
   ; CHECK-LABEL: name: call_byval_a4i64_align4_higher_source_align
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -4083,36 +4063,34 @@ define void @call_byval_a4i64_align4_higher_source_align([4 x i64] addrspace(5)*
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
   ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p5) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @void_func_byval_a4i64_align4
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; CHECK-NEXT:   [[AMDGPU_WAVE_ADDRESS:%[0-9]+]]:_(p5) = G_AMDGPU_WAVE_ADDRESS $sgpr32
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[AMDGPU_WAVE_ADDRESS]], [[C]](s32)
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
   ; CHECK-NEXT:   G_MEMCPY [[PTR_ADD]](p5), [[COPY8]](p5), [[C1]](s32), 0 :: (dereferenceable store (s256) into stack, align 4, addrspace 5), (dereferenceable load (s256) from %ir.incoming_high_align, align 256, addrspace 5)
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY12]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY16]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY17]](s32)
+  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @void_func_byval_a4i64_align4, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 32, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY19:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY9]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY19]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @void_func_byval_a4i64_align4([4 x i64] addrspace(5)* byval([4 x i64]) align 4 %incoming_high_align)
   ret void
 }
@@ -4610,7 +4588,7 @@ entry:
 define void @stack_12xv3i32() #0 {
   ; CHECK-LABEL: name: stack_12xv3i32
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -4620,7 +4598,6 @@ define void @stack_12xv3i32() #0 {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32)
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -4651,14 +4628,14 @@ define void @stack_12xv3i32() #0 {
   ; CHECK-NEXT:   [[BUILD_VECTOR11:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C13]](s32), [[C14]](s32), [[C15]](s32)
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_12xv3i32
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<3 x s32>)
   ; CHECK-NEXT:   [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR1]](<3 x s32>)
   ; CHECK-NEXT:   [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR2]](<3 x s32>)
@@ -4718,20 +4695,19 @@ define void @stack_12xv3i32() #0 {
   ; CHECK-NEXT:   $vgpr28 = COPY [[UV28]](s32)
   ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
   ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_void_func_12xv3i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 20, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY18]]
+  ; CHECK-NEXT:   SI_RETURN
 entry:
   call void @external_void_func_12xv3i32(
       <3 x i32> <i32 0, i32 0, i32 0>,
@@ -4752,7 +4728,7 @@ entry:
 define void @stack_12xv3f32() #0 {
   ; CHECK-LABEL: name: stack_12xv3f32
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -4762,7 +4738,6 @@ define void @stack_12xv3f32() #0 {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32)
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
@@ -4793,14 +4768,14 @@ define void @stack_12xv3f32() #0 {
   ; CHECK-NEXT:   [[BUILD_VECTOR11:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[C13]](s32), [[C14]](s32), [[C15]](s32)
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_12xv3f32
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<3 x s32>)
   ; CHECK-NEXT:   [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR1]](<3 x s32>)
   ; CHECK-NEXT:   [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR2]](<3 x s32>)
@@ -4860,20 +4835,19 @@ define void @stack_12xv3f32() #0 {
   ; CHECK-NEXT:   $vgpr28 = COPY [[UV28]](s32)
   ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
   ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_void_func_12xv3f32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 20, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY18]]
+  ; CHECK-NEXT:   SI_RETURN
 entry:
   call void @external_void_func_12xv3f32(
       <3 x float> <float 0.0, float 0.0, float 0.0>,
@@ -4894,7 +4868,7 @@ entry:
 define void @stack_8xv5i32() #0 {
   ; CHECK-LABEL: name: stack_8xv5i32
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -4904,7 +4878,6 @@ define void @stack_8xv5i32() #0 {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -4931,14 +4904,14 @@ define void @stack_8xv5i32() #0 {
   ; CHECK-NEXT:   [[BUILD_VECTOR7:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C11]](s32), [[C12]](s32), [[C13]](s32), [[C14]](s32), [[C15]](s32)
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_8xv5i32
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<5 x s32>)
   ; CHECK-NEXT:   [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR1]](<5 x s32>)
   ; CHECK-NEXT:   [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR2]](<5 x s32>)
@@ -5006,20 +4979,19 @@ define void @stack_8xv5i32() #0 {
   ; CHECK-NEXT:   $vgpr28 = COPY [[UV28]](s32)
   ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
   ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_void_func_8xv5i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 36, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY18]]
+  ; CHECK-NEXT:   SI_RETURN
 entry:
   call void @external_void_func_8xv5i32(
       <5 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0>,
@@ -5036,7 +5008,7 @@ entry:
 define void @stack_8xv5f32() #0 {
   ; CHECK-LABEL: name: stack_8xv5f32
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -5046,7 +5018,6 @@ define void @stack_8xv5f32() #0 {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32), [[C]](s32)
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
@@ -5073,14 +5044,14 @@ define void @stack_8xv5f32() #0 {
   ; CHECK-NEXT:   [[BUILD_VECTOR7:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[C11]](s32), [[C12]](s32), [[C13]](s32), [[C14]](s32), [[C15]](s32)
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @external_void_func_8xv5f32
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<5 x s32>)
   ; CHECK-NEXT:   [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32), [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR1]](<5 x s32>)
   ; CHECK-NEXT:   [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32), [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32), [[UV14:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR2]](<5 x s32>)
@@ -5148,20 +5119,19 @@ define void @stack_8xv5f32() #0 {
   ; CHECK-NEXT:   $vgpr28 = COPY [[UV28]](s32)
   ; CHECK-NEXT:   $vgpr29 = COPY [[UV29]](s32)
   ; CHECK-NEXT:   $vgpr30 = COPY [[UV30]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @external_void_func_8xv5f32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 36, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY18]]
+  ; CHECK-NEXT:   SI_RETURN
 entry:
   call void @external_void_func_8xv5f32(
       <5 x float> <float 0.0, float 0.0, float 0.0, float 0.0, float 0.0>,

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-constantexpr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-constantexpr.ll
index 78ecae87d06f5..138e66ffd6c7a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-constantexpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-constantexpr.ll
@@ -6,21 +6,17 @@
 define i32 @test() {
   ; CHECK-LABEL: name: test
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
   ; CHECK-NEXT:   [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[C]](s32)
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @var
   ; CHECK-NEXT:   [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[INTTOPTR]](p0), [[GV]]
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY [[ZEXT]](s32)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY [[ZEXT]](s32)
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY3]](s32)
-  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY4]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0
+  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY3]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   ret i32 bitcast (<1 x i32> <i32 extractelement (<1 x i32> bitcast (i32 zext (i1 icmp eq (i32* @var, i32* inttoptr (i32 -1 to i32*)) to i32) to <1 x i32>), i64 0)> to i32)
 }
 
@@ -66,9 +62,6 @@ define amdgpu_kernel void @constantexpr_select_1() {
 define i32 @test_fcmp_constexpr() {
   ; CHECK-LABEL: name: test_fcmp_constexpr
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @a
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[GV]], [[C]](s64)
@@ -79,8 +72,7 @@ define i32 @test_fcmp_constexpr() {
   ; CHECK-NEXT:   [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(oeq), [[UITOFP]](s32), [[C1]]
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[FCMP]](s1)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ZEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   ret i32 zext (i1 fcmp oeq (float uitofp (i1 icmp eq (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @a, i64 0, i64 1), i32* @var) to float), float 0.000000e+00) to i32)
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-constrained-fp.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-constrained-fp.ll
index 10218b9a88d07..e01d9c399e937 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-constrained-fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-constrained-fp.ll
@@ -4,15 +4,13 @@
 define float @v_constained_fadd_f32_fpexcept_strict(float %x, float %y) #0 {
   ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_strict
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[STRICT_FADD:%[0-9]+]]:_(s32) = G_STRICT_FADD [[COPY]], [[COPY1]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[STRICT_FADD]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %val
 }
@@ -20,15 +18,13 @@ define float @v_constained_fadd_f32_fpexcept_strict(float %x, float %y) #0 {
 define float @v_constained_fadd_f32_fpexcept_strict_flags(float %x, float %y) #0 {
   ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_strict_flags
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[STRICT_FADD:%[0-9]+]]:_(s32) = nsz G_STRICT_FADD [[COPY]], [[COPY1]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[STRICT_FADD]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = call nsz float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %val
 }
@@ -36,15 +32,13 @@ define float @v_constained_fadd_f32_fpexcept_strict_flags(float %x, float %y) #0
 define float @v_constained_fadd_f32_fpexcept_ignore(float %x, float %y) #0 {
   ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_ignore
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   %3:_(s32) = nofpexcept G_STRICT_FADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   $vgpr0 = COPY %3(s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   %2:_(s32) = nofpexcept G_STRICT_FADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   $vgpr0 = COPY %2(s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
   ret float %val
 }
@@ -52,15 +46,13 @@ define float @v_constained_fadd_f32_fpexcept_ignore(float %x, float %y) #0 {
 define float @v_constained_fadd_f32_fpexcept_ignore_flags(float %x, float %y) #0 {
   ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_ignore_flags
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   %3:_(s32) = nsz nofpexcept G_STRICT_FADD [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   $vgpr0 = COPY %3(s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   %2:_(s32) = nsz nofpexcept G_STRICT_FADD [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   $vgpr0 = COPY %2(s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = call nsz float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
   ret float %val
 }
@@ -68,15 +60,13 @@ define float @v_constained_fadd_f32_fpexcept_ignore_flags(float %x, float %y) #0
 define float @v_constained_fadd_f32_fpexcept_maytrap(float %x, float %y) #0 {
   ; CHECK-LABEL: name: v_constained_fadd_f32_fpexcept_maytrap
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[STRICT_FADD:%[0-9]+]]:_(s32) = G_STRICT_FADD [[COPY]], [[COPY1]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[STRICT_FADD]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
   ret float %val
 }
@@ -84,7 +74,7 @@ define float @v_constained_fadd_f32_fpexcept_maytrap(float %x, float %y) #0 {
 define <2 x float> @v_constained_fadd_v2f32_fpexcept_strict(<2 x float> %x, <2 x float> %y) #0 {
   ; CHECK-LABEL: name: v_constained_fadd_v2f32_fpexcept_strict
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -92,13 +82,11 @@ define <2 x float> @v_constained_fadd_v2f32_fpexcept_strict(<2 x float> %x, <2 x
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[STRICT_FADD:%[0-9]+]]:_(<2 x s32>) = G_STRICT_FADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[STRICT_FADD]](<2 x s32>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret <2 x float> %val
 }
@@ -106,7 +94,7 @@ define <2 x float> @v_constained_fadd_v2f32_fpexcept_strict(<2 x float> %x, <2 x
 define <2 x float> @v_constained_fadd_v2f32_fpexcept_ignore(<2 x float> %x, <2 x float> %y) #0 {
   ; CHECK-LABEL: name: v_constained_fadd_v2f32_fpexcept_ignore
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -114,13 +102,11 @@ define <2 x float> @v_constained_fadd_v2f32_fpexcept_ignore(<2 x float> %x, <2 x
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   %7:_(<2 x s32>) = nofpexcept G_STRICT_FADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
-  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES %7(<2 x s32>)
+  ; CHECK-NEXT:   %6:_(<2 x s32>) = nofpexcept G_STRICT_FADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES %6(<2 x s32>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
   ret <2 x float> %val
 }
@@ -128,7 +114,7 @@ define <2 x float> @v_constained_fadd_v2f32_fpexcept_ignore(<2 x float> %x, <2 x
 define <2 x float> @v_constained_fadd_v2f32_fpexcept_maytrap(<2 x float> %x, <2 x float> %y) #0 {
   ; CHECK-LABEL: name: v_constained_fadd_v2f32_fpexcept_maytrap
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -136,13 +122,11 @@ define <2 x float> @v_constained_fadd_v2f32_fpexcept_maytrap(<2 x float> %x, <2
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[STRICT_FADD:%[0-9]+]]:_(<2 x s32>) = G_STRICT_FADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[STRICT_FADD]](<2 x s32>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap")
   ret <2 x float> %val
 }
@@ -150,15 +134,13 @@ define <2 x float> @v_constained_fadd_v2f32_fpexcept_maytrap(<2 x float> %x, <2
 define float @v_constained_fsub_f32_fpexcept_ignore_flags(float %x, float %y) #0 {
   ; CHECK-LABEL: name: v_constained_fsub_f32_fpexcept_ignore_flags
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   %3:_(s32) = nsz nofpexcept G_STRICT_FSUB [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   $vgpr0 = COPY %3(s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   %2:_(s32) = nsz nofpexcept G_STRICT_FSUB [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   $vgpr0 = COPY %2(s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = call nsz float @llvm.experimental.constrained.fsub.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
   ret float %val
 }
@@ -166,15 +148,13 @@ define float @v_constained_fsub_f32_fpexcept_ignore_flags(float %x, float %y) #0
 define float @v_constained_fmul_f32_fpexcept_ignore_flags(float %x, float %y) #0 {
   ; CHECK-LABEL: name: v_constained_fmul_f32_fpexcept_ignore_flags
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   %3:_(s32) = nsz nofpexcept G_STRICT_FMUL [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   $vgpr0 = COPY %3(s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   %2:_(s32) = nsz nofpexcept G_STRICT_FMUL [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   $vgpr0 = COPY %2(s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = call nsz float @llvm.experimental.constrained.fmul.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
   ret float %val
 }
@@ -182,15 +162,13 @@ define float @v_constained_fmul_f32_fpexcept_ignore_flags(float %x, float %y) #0
 define float @v_constained_fdiv_f32_fpexcept_ignore_flags(float %x, float %y) #0 {
   ; CHECK-LABEL: name: v_constained_fdiv_f32_fpexcept_ignore_flags
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   %3:_(s32) = nsz nofpexcept G_STRICT_FDIV [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   $vgpr0 = COPY %3(s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   %2:_(s32) = nsz nofpexcept G_STRICT_FDIV [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   $vgpr0 = COPY %2(s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = call nsz float @llvm.experimental.constrained.fdiv.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
   ret float %val
 }
@@ -198,15 +176,13 @@ define float @v_constained_fdiv_f32_fpexcept_ignore_flags(float %x, float %y) #0
 define float @v_constained_frem_f32_fpexcept_ignore_flags(float %x, float %y) #0 {
   ; CHECK-LABEL: name: v_constained_frem_f32_fpexcept_ignore_flags
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   %3:_(s32) = nsz nofpexcept G_STRICT_FREM [[COPY]], [[COPY1]]
-  ; CHECK-NEXT:   $vgpr0 = COPY %3(s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   %2:_(s32) = nsz nofpexcept G_STRICT_FREM [[COPY]], [[COPY1]]
+  ; CHECK-NEXT:   $vgpr0 = COPY %2(s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = call nsz float @llvm.experimental.constrained.frem.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore")
   ret float %val
 }
@@ -214,16 +190,14 @@ define float @v_constained_frem_f32_fpexcept_ignore_flags(float %x, float %y) #0
 define float @v_constained_fma_f32_fpexcept_ignore_flags(float %x, float %y, float %z) #0 {
   ; CHECK-LABEL: name: v_constained_fma_f32_fpexcept_ignore_flags
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   %4:_(s32) = nsz nofpexcept G_STRICT_FMA [[COPY]], [[COPY1]], [[COPY2]]
-  ; CHECK-NEXT:   $vgpr0 = COPY %4(s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+  ; CHECK-NEXT:   %3:_(s32) = nsz nofpexcept G_STRICT_FMA [[COPY]], [[COPY1]], [[COPY2]]
+  ; CHECK-NEXT:   $vgpr0 = COPY %3(s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = call nsz float @llvm.experimental.constrained.fma.f32(float %x, float %y, float %z, metadata !"round.tonearest", metadata !"fpexcept.ignore")
   ret float %val
 }
@@ -231,14 +205,12 @@ define float @v_constained_fma_f32_fpexcept_ignore_flags(float %x, float %y, flo
 define float @v_constained_sqrt_f32_fpexcept_strict(float %x) #0 {
   ; CHECK-LABEL: name: v_constained_sqrt_f32_fpexcept_strict
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[STRICT_FSQRT:%[0-9]+]]:_(s32) = G_STRICT_FSQRT [[COPY]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[STRICT_FSQRT]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %val = call float @llvm.experimental.constrained.sqrt.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict")
   ret float %val
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fixed-function-abi-vgpr-args.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fixed-function-abi-vgpr-args.ll
index ceba50dc1b7b4..af0dd868c9092 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fixed-function-abi-vgpr-args.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-fixed-function-abi-vgpr-args.ll
@@ -7,7 +7,7 @@
 define void @void_a31i32_i32([31 x i32] %arg0, i32 %arg1) {
   ; FIXED-LABEL: name: void_a31i32_i32
   ; FIXED: bb.1 (%ir-block.0):
-  ; FIXED-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; FIXED-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; FIXED-NEXT: {{  $}}
   ; FIXED-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; FIXED-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -42,11 +42,9 @@ define void @void_a31i32_i32([31 x i32] %arg0, i32 %arg1) {
   ; FIXED-NEXT:   [[COPY30:%[0-9]+]]:_(s32) = COPY $vgpr30
   ; FIXED-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; FIXED-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load (s32) from %fixed-stack.0, align 16, addrspace 5)
-  ; FIXED-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; FIXED-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; FIXED-NEXT:   G_STORE [[LOAD]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; FIXED-NEXT:   [[COPY32:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; FIXED-NEXT:   S_SETPC_B64_return [[COPY32]]
+  ; FIXED-NEXT:   SI_RETURN
   store i32 %arg1, i32 addrspace(1)* undef
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.ll
index a5bc34a9fac78..8adc0ac7009b6 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-function-args.ll
@@ -8,14 +8,12 @@
 define void @void_func_empty_arg({} %arg0, i32 %arg1) #0 {
   ; CHECK-LABEL: name: void_func_empty_arg
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[COPY]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i32 %arg1, i32 addrspace(1)* undef
   ret void
 }
@@ -23,14 +21,12 @@ define void @void_func_empty_arg({} %arg0, i32 %arg1) #0 {
 define void @void_func_empty_array([0 x i8] %arg0, i32 %arg1) #0 {
   ; CHECK-LABEL: name: void_func_empty_array
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[COPY]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i32 %arg1, i32 addrspace(1)* undef
   ret void
 }
@@ -38,15 +34,13 @@ define void @void_func_empty_array([0 x i8] %arg0, i32 %arg1) #0 {
 define void @void_func_i1(i1 %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i1
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC]](s1), [[DEF]](p1) :: (store (s1) into `i1 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i1 %arg0, i1 addrspace(1)* undef
   ret void
 }
@@ -54,19 +48,17 @@ define void @void_func_i1(i1 %arg0) #0 {
 define void @void_func_i1_zeroext(i1 zeroext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i1_zeroext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 1
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ASSERT_ZEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s1)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[C]]
   ; CHECK-NEXT:   G_STORE [[ADD]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ext = zext i1 %arg0 to i32
   %add = add i32 %ext, 12
   store i32 %add, i32 addrspace(1)* undef
@@ -76,19 +68,17 @@ define void @void_func_i1_zeroext(i1 zeroext %arg0) #0 {
 define void @void_func_i1_signext(i1 signext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i1_signext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY]], 1
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ASSERT_SEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s1)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SEXT]], [[C]]
   ; CHECK-NEXT:   G_STORE [[ADD]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ext = sext i1 %arg0 to i32
   %add = add i32 %ext, 12
   store i32 %add, i32 addrspace(1)* undef
@@ -99,11 +89,10 @@ define void @i1_arg_i1_use(i1 %arg) #0 {
   ; CHECK-LABEL: name: i1_arg_i1_use
   ; CHECK: bb.1.bb:
   ; CHECK-NEXT:   successors: %bb.2(0x40000000), %bb.3(0x40000000)
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
@@ -120,8 +109,7 @@ define void @i1_arg_i1_use(i1 %arg) #0 {
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT: bb.3.bb2:
   ; CHECK-NEXT:   G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[INT1]](s64)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
 bb:
   br i1 %arg, label %bb2, label %bb1
 
@@ -136,16 +124,14 @@ bb2:
 define void @void_func_i8(i8 %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC]](s16)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC1]](s8), [[DEF]](p1) :: (store (s8) into `i8 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i8 %arg0, i8 addrspace(1)* undef
   ret void
 }
@@ -153,19 +139,17 @@ define void @void_func_i8(i8 %arg0) #0 {
 define void @void_func_i8_zeroext(i8 zeroext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i8_zeroext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 8
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_ZEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s8)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[C]]
   ; CHECK-NEXT:   G_STORE [[ADD]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ext = zext i8 %arg0 to i32
   %add = add i32 %ext, 12
   store i32 %add, i32 addrspace(1)* undef
@@ -175,19 +159,17 @@ define void @void_func_i8_zeroext(i8 zeroext %arg0) #0 {
 define void @void_func_i8_signext(i8 signext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i8_signext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY]], 8
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[ASSERT_SEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s8)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SEXT]], [[C]]
   ; CHECK-NEXT:   G_STORE [[ADD]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ext = sext i8 %arg0 to i32
   %add = add i32 %ext, 12
   store i32 %add, i32 addrspace(1)* undef
@@ -197,15 +179,13 @@ define void @void_func_i8_signext(i8 signext %arg0) #0 {
 define void @void_func_i16(i16 %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC]](s16), [[DEF]](p1) :: (store (s16) into `i16 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i16 %arg0, i16 addrspace(1)* undef
   ret void
 }
@@ -213,19 +193,17 @@ define void @void_func_i16(i16 %arg0) #0 {
 define void @void_func_i16_zeroext(i16 zeroext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i16_zeroext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 16
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_ZEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC]](s16)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ZEXT]], [[C]]
   ; CHECK-NEXT:   G_STORE [[ADD]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ext = zext i16 %arg0 to i32
   %add = add i32 %ext, 12
   store i32 %add, i32 addrspace(1)* undef
@@ -235,19 +213,17 @@ define void @void_func_i16_zeroext(i16 zeroext %arg0) #0 {
 define void @void_func_i16_signext(i16 signext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i16_signext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY]], 16
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASSERT_SEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[TRUNC]](s16)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SEXT]], [[C]]
   ; CHECK-NEXT:   G_STORE [[ADD]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %ext = sext i16 %arg0 to i32
   %add = add i32 %ext, 12
   store i32 %add, i32 addrspace(1)* undef
@@ -257,15 +233,13 @@ define void @void_func_i16_signext(i16 signext %arg0) #0 {
 define void @void_func_i24(i24 %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i24
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s24) = G_TRUNC [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC]](s24), [[DEF]](p1) :: (store (s24) into `i24 addrspace(1)* undef`, align 4, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i24 %arg0, i24 addrspace(1)* undef
   ret void
 }
@@ -273,16 +247,14 @@ define void @void_func_i24(i24 %arg0) #0 {
 define void @void_func_i24_zeroext(i24 zeroext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i24_zeroext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[ASSERT_ZEXT:%[0-9]+]]:_(s32) = G_ASSERT_ZEXT [[COPY]], 24
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s24) = G_TRUNC [[ASSERT_ZEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC]](s24), [[DEF]](p1) :: (store (s24) into `i24 addrspace(1)* undef`, align 4, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i24 %arg0, i24 addrspace(1)* undef
   ret void
 }
@@ -290,16 +262,14 @@ define void @void_func_i24_zeroext(i24 zeroext %arg0) #0 {
 define void @void_func_i24_signext(i24 signext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i24_signext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY]], 24
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s24) = G_TRUNC [[ASSERT_SEXT]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC]](s24), [[DEF]](p1) :: (store (s24) into `i24 addrspace(1)* undef`, align 4, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i24 %arg0, i24 addrspace(1)* undef
   ret void
 }
@@ -307,14 +277,12 @@ define void @void_func_i24_signext(i24 signext %arg0) #0 {
 define void @void_func_i32(i32 %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[COPY]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i32 %arg0, i32 addrspace(1)* undef
   ret void
 }
@@ -323,14 +291,12 @@ define void @void_func_i32(i32 %arg0) #0 {
 define void @void_func_i32_signext(i32 signext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i32_signext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[COPY]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i32 %arg0, i32 addrspace(1)* undef
   ret void
 }
@@ -339,14 +305,12 @@ define void @void_func_i32_signext(i32 signext %arg0) #0 {
 define void @void_func_i32_zeroext(i32 zeroext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i32_zeroext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[COPY]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i32 %arg0, i32 addrspace(1)* undef
   ret void
 }
@@ -354,14 +318,12 @@ define void @void_func_i32_zeroext(i32 zeroext %arg0) #0 {
 define void @void_func_p3i8(i8 addrspace(3)* %arg0) #0 {
   ; CHECK-LABEL: name: void_func_p3i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[COPY]](p3), [[DEF]](p1) :: (store (p3) into `i8 addrspace(3)* addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store i8 addrspace(3)* %arg0, i8 addrspace(3)* addrspace(1)* undef
   ret void
 }
@@ -369,17 +331,15 @@ define void @void_func_p3i8(i8 addrspace(3)* %arg0) #0 {
 define void @void_func_i48(i48 %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i48
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s48) = G_TRUNC [[MV]](s64)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC]](s48), [[DEF]](p1) :: (store (s48) into `i48 addrspace(1)* undef`, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store i48 %arg0, i48 addrspace(1)* undef
   ret void
 }
@@ -387,20 +347,18 @@ define void @void_func_i48(i48 %arg0) #0 {
 define void @void_func_i48_zeroext(i48 zeroext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i48_zeroext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s48) = G_TRUNC [[MV]](s64)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[TRUNC]](s48)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[ZEXT]], [[C]]
   ; CHECK-NEXT:   G_STORE [[ADD]](s64), [[DEF]](p1) :: (store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   %ext = zext i48 %arg0 to i64
   %add = add i64 %ext, 12
   store i64 %add, i64 addrspace(1)* undef
@@ -410,20 +368,18 @@ define void @void_func_i48_zeroext(i48 zeroext %arg0) #0 {
 define void @void_func_i48_signext(i48 signext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i48_signext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s48) = G_TRUNC [[MV]](s64)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[TRUNC]](s48)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s64) = G_ADD [[SEXT]], [[C]]
   ; CHECK-NEXT:   G_STORE [[ADD]](s64), [[DEF]](p1) :: (store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   %ext = sext i48 %arg0 to i64
   %add = add i64 %ext, 12
   store i64 %add, i64 addrspace(1)* undef
@@ -433,16 +389,14 @@ define void @void_func_i48_signext(i48 signext %arg0) #0 {
 define void @void_func_i64(i64 %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[MV]](s64), [[DEF]](p1) :: (store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store i64 %arg0, i64 addrspace(1)* undef
   ret void
 }
@@ -450,18 +404,16 @@ define void @void_func_i64(i64 %arg0) #0 {
 define void @void_func_i95(i95 %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i95
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s95) = G_TRUNC [[MV]](s96)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC]](s95), [[DEF]](p1) :: (store (s95) into `i95 addrspace(1)* undef`, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   store i95 %arg0, i95 addrspace(1)* undef
   ret void
 }
@@ -469,21 +421,19 @@ define void @void_func_i95(i95 %arg0) #0 {
 define void @void_func_i95_zeroext(i95 zeroext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i95_zeroext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s95) = G_TRUNC [[MV]](s96)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s96) = G_CONSTANT i96 12
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s96) = G_ZEXT [[TRUNC]](s95)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s96) = G_ADD [[ZEXT]], [[C]]
   ; CHECK-NEXT:   G_STORE [[ADD]](s96), [[DEF]](p1) :: (store (s96) into `i96 addrspace(1)* undef`, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   %ext = zext i95 %arg0 to i96
   %add = add i96 %ext, 12
   store i96 %add, i96 addrspace(1)* undef
@@ -493,21 +443,19 @@ define void @void_func_i95_zeroext(i95 zeroext %arg0) #0 {
 define void @void_func_i95_signext(i95 signext %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i95_signext
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s95) = G_TRUNC [[MV]](s96)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s96) = G_CONSTANT i96 12
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(s96) = G_SEXT [[TRUNC]](s95)
   ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s96) = G_ADD [[SEXT]], [[C]]
   ; CHECK-NEXT:   G_STORE [[ADD]](s96), [[DEF]](p1) :: (store (s96) into `i96 addrspace(1)* undef`, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   %ext = sext i95 %arg0 to i96
   %add = add i96 %ext, 12
   store i96 %add, i96 addrspace(1)* undef
@@ -517,17 +465,15 @@ define void @void_func_i95_signext(i95 signext %arg0) #0 {
 define void @void_func_i96(i96 %arg0) #0 {
   ; CHECK-LABEL: name: void_func_i96
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s96) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[MV]](s96), [[DEF]](p1) :: (store (s96) into `i96 addrspace(1)* undef`, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   store i96 %arg0, i96 addrspace(1)* undef
   ret void
 }
@@ -535,16 +481,14 @@ define void @void_func_i96(i96 %arg0) #0 {
 define void @void_func_p0i8(i8* %arg0) #0 {
   ; CHECK-LABEL: name: void_func_p0i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[MV]](p0), [[DEF]](p1) :: (store (p0) into `i8* addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store i8* %arg0, i8* addrspace(1)* undef
   ret void
 }
@@ -552,16 +496,14 @@ define void @void_func_p0i8(i8* %arg0) #0 {
 define void @void_func_p1i8(i8 addrspace(1)* %arg0) #0 {
   ; CHECK-LABEL: name: void_func_p1i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[MV]](p1), [[DEF]](p1) :: (store (p1) into `i8 addrspace(1)* addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store i8 addrspace(1)* %arg0, i8 addrspace(1)* addrspace(1)* undef
   ret void
 }
@@ -569,15 +511,13 @@ define void @void_func_p1i8(i8 addrspace(1)* %arg0) #0 {
 define void @void_func_f16(half %arg0) #0 {
   ; CHECK-LABEL: name: void_func_f16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC]](s16), [[DEF]](p1) :: (store (s16) into `half addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store half %arg0, half addrspace(1)* undef
   ret void
 }
@@ -585,14 +525,12 @@ define void @void_func_f16(half %arg0) #0 {
 define void @void_func_f32(float %arg0) #0 {
   ; CHECK-LABEL: name: void_func_f32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[COPY]](s32), [[DEF]](p1) :: (store (s32) into `float addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store float %arg0, float addrspace(1)* undef
   ret void
 }
@@ -600,16 +538,14 @@ define void @void_func_f32(float %arg0) #0 {
 define void @void_func_f64(double %arg0) #0 {
   ; CHECK-LABEL: name: void_func_f64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[MV]](s64), [[DEF]](p1) :: (store (s64) into `double addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store double %arg0, double addrspace(1)* undef
   ret void
 }
@@ -617,16 +553,14 @@ define void @void_func_f64(double %arg0) #0 {
 define void @void_func_v2i32(<2 x i32> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v2i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<2 x s32>), [[DEF]](p1) :: (store (<2 x s32>) into `<2 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store <2 x i32> %arg0, <2 x i32> addrspace(1)* undef
   ret void
 }
@@ -634,17 +568,15 @@ define void @void_func_v2i32(<2 x i32> %arg0) #0 {
 define void @void_func_v2i24(<2 x i24> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v2i24
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(<2 x s24>) = G_TRUNC [[BUILD_VECTOR]](<2 x s32>)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC]](<2 x s24>), [[DEF]](p1) :: (store (<2 x s24>) into `<2 x i24> addrspace(1)* undef`, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store <2 x i24> %arg0, <2 x i24> addrspace(1)* undef
   ret void
 }
@@ -652,18 +584,16 @@ define void @void_func_v2i24(<2 x i24> %arg0) #0 {
 define void @void_func_v3i24(<3 x i24> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v3i24
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(<3 x s24>) = G_TRUNC [[BUILD_VECTOR]](<3 x s32>)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC]](<3 x s24>), [[DEF]](p1) :: (store (<3 x s24>) into `<3 x i24> addrspace(1)* undef`, align 16, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   store <3 x i24> %arg0, <3 x i24> addrspace(1)* undef
   ret void
 }
@@ -671,7 +601,7 @@ define void @void_func_v3i24(<3 x i24> %arg0) #0 {
 define void @void_func_v2i8(<2 x i8> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v2i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -679,11 +609,9 @@ define void @void_func_v2i8(<2 x i8> %arg0) #0 {
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
   ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(<2 x s8>) = G_TRUNC [[BUILD_VECTOR]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC2]](<2 x s8>), [[DEF]](p1) :: (store (<2 x s8>) into `<2 x i8> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store <2 x i8> %arg0, <2 x i8> addrspace(1)* undef
   ret void
 }
@@ -691,7 +619,7 @@ define void @void_func_v2i8(<2 x i8> %arg0) #0 {
 define void @void_func_v3i8(<3 x i8> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v3i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -701,11 +629,9 @@ define void @void_func_v3i8(<3 x i8> %arg0) #0 {
   ; CHECK-NEXT:   [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16)
   ; CHECK-NEXT:   [[TRUNC3:%[0-9]+]]:_(<3 x s8>) = G_TRUNC [[BUILD_VECTOR]](<3 x s16>)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC3]](<3 x s8>), [[DEF]](p1) :: (store (<3 x s8>) into `<3 x i8> addrspace(1)* undef`, align 4, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   store <3 x i8> %arg0, <3 x i8> addrspace(1)* undef
   ret void
 }
@@ -713,7 +639,7 @@ define void @void_func_v3i8(<3 x i8> %arg0) #0 {
 define void @void_func_v4i8(<4 x i8> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v4i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -725,11 +651,9 @@ define void @void_func_v4i8(<4 x i8> %arg0) #0 {
   ; CHECK-NEXT:   [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[COPY3]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16), [[TRUNC3]](s16)
   ; CHECK-NEXT:   [[TRUNC4:%[0-9]+]]:_(<4 x s8>) = G_TRUNC [[BUILD_VECTOR]](<4 x s16>)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC4]](<4 x s8>), [[DEF]](p1) :: (store (<4 x s8>) into `<4 x i8> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]]
+  ; CHECK-NEXT:   SI_RETURN
   store <4 x i8> %arg0, <4 x i8> addrspace(1)* undef
   ret void
 }
@@ -737,16 +661,14 @@ define void @void_func_v4i8(<4 x i8> %arg0) #0 {
 define void @void_func_v2p3i8(<2 x i8 addrspace(3)*> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v2p3i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p3>) = G_BUILD_VECTOR [[COPY]](p3), [[COPY1]](p3)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<2 x p3>), [[DEF]](p1) :: (store (<2 x p3>) into `<2 x i8 addrspace(3)*> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store <2 x i8 addrspace(3)*> %arg0, <2 x i8 addrspace(3)*> addrspace(1)* undef
   ret void
 }
@@ -754,17 +676,15 @@ define void @void_func_v2p3i8(<2 x i8 addrspace(3)*> %arg0) #0 {
 define void @void_func_v3i32(<3 x i32> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v3i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<3 x s32>), [[DEF]](p1) :: (store (<3 x s32>) into `<3 x i32> addrspace(1)* undef`, align 16, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   store <3 x i32> %arg0, <3 x i32> addrspace(1)* undef
   ret void
 }
@@ -772,18 +692,16 @@ define void @void_func_v3i32(<3 x i32> %arg0) #0 {
 define void @void_func_v4i32(<4 x i32> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v4i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<4 x s32>), [[DEF]](p1) :: (store (<4 x s32>) into `<4 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]]
+  ; CHECK-NEXT:   SI_RETURN
   store <4 x i32> %arg0, <4 x i32> addrspace(1)* undef
   ret void
 }
@@ -791,7 +709,7 @@ define void @void_func_v4i32(<4 x i32> %arg0) #0 {
 define void @void_func_v5i32(<5 x i32> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v5i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -799,11 +717,9 @@ define void @void_func_v5i32(<5 x i32> %arg0) #0 {
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<5 x s32>), [[DEF]](p1) :: (store (<5 x s32>) into `<5 x i32> addrspace(1)* undef`, align 32, addrspace 1)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY5]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY6]]
+  ; CHECK-NEXT:   SI_RETURN
   store <5 x i32> %arg0, <5 x i32> addrspace(1)* undef
   ret void
 }
@@ -811,7 +727,7 @@ define void @void_func_v5i32(<5 x i32> %arg0) #0 {
 define void @void_func_v8i32(<8 x i32> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v8i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -822,11 +738,9 @@ define void @void_func_v8i32(<8 x i32> %arg0) #0 {
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<8 x s32>), [[DEF]](p1) :: (store (<8 x s32>) into `<8 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY9]]
+  ; CHECK-NEXT:   SI_RETURN
   store <8 x i32> %arg0, <8 x i32> addrspace(1)* undef
   ret void
 }
@@ -834,7 +748,7 @@ define void @void_func_v8i32(<8 x i32> %arg0) #0 {
 define void @void_func_v16i32(<16 x i32> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v16i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -853,11 +767,9 @@ define void @void_func_v16i32(<16 x i32> %arg0) #0 {
   ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
   ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<16 x s32>), [[DEF]](p1) :: (store (<16 x s32>) into `<16 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY17]]
+  ; CHECK-NEXT:   SI_RETURN
   store <16 x i32> %arg0, <16 x i32> addrspace(1)* undef
   ret void
 }
@@ -865,7 +777,7 @@ define void @void_func_v16i32(<16 x i32> %arg0) #0 {
 define void @void_func_v32i32(<32 x i32> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v32i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -901,11 +813,9 @@ define void @void_func_v32i32(<32 x i32> %arg0) #0 {
   ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load (s32) from %fixed-stack.0, align 16, addrspace 5)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<32 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[LOAD]](s32)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (store (<32 x s32>) into `<32 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY32]]
+  ; CHECK-NEXT:   SI_RETURN
   store <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
   ret void
 }
@@ -914,7 +824,7 @@ define void @void_func_v32i32(<32 x i32> %arg0) #0 {
 define void @void_func_v33i32(<33 x i32> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v33i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -952,11 +862,9 @@ define void @void_func_v33i32(<33 x i32> %arg0) #0 {
   ; CHECK-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.0, addrspace 5)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<33 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32), [[COPY16]](s32), [[COPY17]](s32), [[COPY18]](s32), [[COPY19]](s32), [[COPY20]](s32), [[COPY21]](s32), [[COPY22]](s32), [[COPY23]](s32), [[COPY24]](s32), [[COPY25]](s32), [[COPY26]](s32), [[COPY27]](s32), [[COPY28]](s32), [[COPY29]](s32), [[COPY30]](s32), [[LOAD]](s32), [[LOAD1]](s32)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<33 x s32>), [[DEF]](p1) :: (store (<33 x s32>) into `<33 x i32> addrspace(1)* undef`, align 256, addrspace 1)
-  ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY32]]
+  ; CHECK-NEXT:   SI_RETURN
   store <33 x i32> %arg0, <33 x i32> addrspace(1)* undef
   ret void
 }
@@ -964,7 +872,7 @@ define void @void_func_v33i32(<33 x i32> %arg0) #0 {
 define void @void_func_v2i64(<2 x i64> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v2i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -973,11 +881,9 @@ define void @void_func_v2i64(<2 x i64> %arg0) #0 {
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<2 x s64>), [[DEF]](p1) :: (store (<2 x s64>) into `<2 x i64> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]]
+  ; CHECK-NEXT:   SI_RETURN
   store <2 x i64> %arg0, <2 x i64> addrspace(1)* undef
   ret void
 }
@@ -985,7 +891,7 @@ define void @void_func_v2i64(<2 x i64> %arg0) #0 {
 define void @void_func_v2p0i8(<2 x i8*> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v2p0i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -994,11 +900,9 @@ define void @void_func_v2p0i8(<2 x i8*> %arg0) #0 {
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p0>) = G_BUILD_VECTOR [[MV]](p0), [[MV1]](p0)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<2 x p0>), [[DEF]](p1) :: (store (<2 x p0>) into `<2 x i8*> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]]
+  ; CHECK-NEXT:   SI_RETURN
   store <2 x i8*> %arg0, <2 x i8*> addrspace(1)* undef
   ret void
 }
@@ -1006,7 +910,7 @@ define void @void_func_v2p0i8(<2 x i8*> %arg0) #0 {
 define void @void_func_v2p1i8(<2 x i8 addrspace(1)*> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v2p1i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1015,11 +919,9 @@ define void @void_func_v2p1i8(<2 x i8 addrspace(1)*> %arg0) #0 {
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p1>) = G_BUILD_VECTOR [[MV]](p1), [[MV1]](p1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<2 x p1>), [[DEF]](p1) :: (store (<2 x p1>) into `<2 x i8 addrspace(1)*> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]]
+  ; CHECK-NEXT:   SI_RETURN
   store <2 x i8 addrspace(1)*> %arg0, <2 x i8 addrspace(1)*> addrspace(1)* undef
   ret void
 }
@@ -1027,7 +929,7 @@ define void @void_func_v2p1i8(<2 x i8 addrspace(1)*> %arg0) #0 {
 define void @void_func_v3i64(<3 x i64> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v3i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1039,11 +941,9 @@ define void @void_func_v3i64(<3 x i64> %arg0) #0 {
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
   ; CHECK-NEXT:   [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<3 x s64>), [[DEF]](p1) :: (store (<3 x s64>) into `<3 x i64> addrspace(1)* undef`, align 32, addrspace 1)
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY7]]
+  ; CHECK-NEXT:   SI_RETURN
   store <3 x i64> %arg0, <3 x i64> addrspace(1)* undef
   ret void
 }
@@ -1051,7 +951,7 @@ define void @void_func_v3i64(<3 x i64> %arg0) #0 {
 define void @void_func_v4i64(<4 x i64> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v4i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1066,11 +966,9 @@ define void @void_func_v4i64(<4 x i64> %arg0) #0 {
   ; CHECK-NEXT:   [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
   ; CHECK-NEXT:   [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<4 x s64>), [[DEF]](p1) :: (store (<4 x s64>) into `<4 x i64> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY9]]
+  ; CHECK-NEXT:   SI_RETURN
   store <4 x i64> %arg0, <4 x i64> addrspace(1)* undef
   ret void
 }
@@ -1078,7 +976,7 @@ define void @void_func_v4i64(<4 x i64> %arg0) #0 {
 define void @void_func_v5i64(<5 x i64> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v5i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1096,11 +994,9 @@ define void @void_func_v5i64(<5 x i64> %arg0) #0 {
   ; CHECK-NEXT:   [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
   ; CHECK-NEXT:   [[MV4:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64), [[MV4]](s64)
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<5 x s64>), [[DEF]](p1) :: (store (<5 x s64>) into `<5 x i64> addrspace(1)* undef`, align 64, addrspace 1)
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY10]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY11]]
+  ; CHECK-NEXT:   SI_RETURN
   store <5 x i64> %arg0, <5 x i64> addrspace(1)* undef
   ret void
 }
@@ -1108,7 +1004,7 @@ define void @void_func_v5i64(<5 x i64> %arg0) #0 {
 define void @void_func_v8i64(<8 x i64> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v8i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1135,11 +1031,9 @@ define void @void_func_v8i64(<8 x i64> %arg0) #0 {
   ; CHECK-NEXT:   [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY12]](s32), [[COPY13]](s32)
   ; CHECK-NEXT:   [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64), [[MV4]](s64), [[MV5]](s64), [[MV6]](s64), [[MV7]](s64)
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<8 x s64>), [[DEF]](p1) :: (store (<8 x s64>) into `<8 x i64> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY17]]
+  ; CHECK-NEXT:   SI_RETURN
   store <8 x i64> %arg0, <8 x i64> addrspace(1)* undef
   ret void
 }
@@ -1147,7 +1041,7 @@ define void @void_func_v8i64(<8 x i64> %arg0) #0 {
 define void @void_func_v16i64(<16 x i64> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v16i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1199,11 +1093,9 @@ define void @void_func_v16i64(<16 x i64> %arg0) #0 {
   ; CHECK-NEXT:   [[MV14:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY28]](s32), [[COPY29]](s32)
   ; CHECK-NEXT:   [[MV15:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY30]](s32), [[LOAD]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64), [[MV4]](s64), [[MV5]](s64), [[MV6]](s64), [[MV7]](s64), [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64), [[MV12]](s64), [[MV13]](s64), [[MV14]](s64), [[MV15]](s64)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<16 x s64>), [[DEF]](p1) :: (store (<16 x s64>) into `<16 x i64> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY32]]
+  ; CHECK-NEXT:   SI_RETURN
   store <16 x i64> %arg0, <16 x i64> addrspace(1)* undef
   ret void
 }
@@ -1211,14 +1103,12 @@ define void @void_func_v16i64(<16 x i64> %arg0) #0 {
 define void @void_func_v2i16(<2 x i16> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v2i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[COPY]](<2 x s16>), [[DEF]](p1) :: (store (<2 x s16>) into `<2 x i16> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store <2 x i16> %arg0, <2 x i16> addrspace(1)* undef
   ret void
 }
@@ -1226,18 +1116,16 @@ define void @void_func_v2i16(<2 x i16> %arg0) #0 {
 define void @void_func_v3i16(<3 x i16> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v3i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s16>)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[UV2]](s16)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<3 x s16>), [[DEF]](p1) :: (store (<3 x s16>) into `<3 x i16> addrspace(1)* undef`, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store <3 x i16> %arg0, <3 x i16> addrspace(1)* undef
   ret void
 }
@@ -1245,16 +1133,14 @@ define void @void_func_v3i16(<3 x i16> %arg0) #0 {
 define void @void_func_v4i16(<4 x i16> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v4i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[CONCAT_VECTORS]](<4 x s16>), [[DEF]](p1) :: (store (<4 x s16>) into `<4 x i16> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store <4 x i16> %arg0, <4 x i16> addrspace(1)* undef
   ret void
 }
@@ -1262,7 +1148,7 @@ define void @void_func_v4i16(<4 x i16> %arg0) #0 {
 define void @void_func_v5i16(<5 x i16> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v5i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1270,11 +1156,9 @@ define void @void_func_v5i16(<5 x i16> %arg0) #0 {
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>), [[COPY2]](<2 x s16>)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<6 x s16>)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<5 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[UV2]](s16), [[UV3]](s16), [[UV4]](s16)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<5 x s16>), [[DEF]](p1) :: (store (<5 x s16>) into `<5 x i16> addrspace(1)* undef`, align 16, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   store <5 x i16> %arg0, <5 x i16> addrspace(1)* undef
   ret void
 }
@@ -1282,18 +1166,16 @@ define void @void_func_v5i16(<5 x i16> %arg0) #0 {
 define void @void_func_v8i16(<8 x i16> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v8i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr3
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>), [[COPY2]](<2 x s16>), [[COPY3]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[CONCAT_VECTORS]](<8 x s16>), [[DEF]](p1) :: (store (<8 x s16>) into `<8 x i16> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]]
+  ; CHECK-NEXT:   SI_RETURN
   store <8 x i16> %arg0, <8 x i16> addrspace(1)* undef
   ret void
 }
@@ -1301,7 +1183,7 @@ define void @void_func_v8i16(<8 x i16> %arg0) #0 {
 define void @void_func_v16i16(<16 x i16> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v16i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1312,11 +1194,9 @@ define void @void_func_v16i16(<16 x i16> %arg0) #0 {
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr6
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr7
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>), [[COPY2]](<2 x s16>), [[COPY3]](<2 x s16>), [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>), [[COPY6]](<2 x s16>), [[COPY7]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[CONCAT_VECTORS]](<16 x s16>), [[DEF]](p1) :: (store (<16 x s16>) into `<16 x i16> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY9]]
+  ; CHECK-NEXT:   SI_RETURN
   store <16 x i16> %arg0, <16 x i16> addrspace(1)* undef
   ret void
 }
@@ -1326,7 +1206,7 @@ define void @void_func_v16i16(<16 x i16> %arg0) #0 {
 define void @void_func_v65i16(<65 x i16> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v65i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1366,11 +1246,9 @@ define void @void_func_v65i16(<65 x i16> %arg0) #0 {
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<66 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>), [[COPY2]](<2 x s16>), [[COPY3]](<2 x s16>), [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>), [[COPY6]](<2 x s16>), [[COPY7]](<2 x s16>), [[COPY8]](<2 x s16>), [[COPY9]](<2 x s16>), [[COPY10]](<2 x s16>), [[COPY11]](<2 x s16>), [[COPY12]](<2 x s16>), [[COPY13]](<2 x s16>), [[COPY14]](<2 x s16>), [[COPY15]](<2 x s16>), [[COPY16]](<2 x s16>), [[COPY17]](<2 x s16>), [[COPY18]](<2 x s16>), [[COPY19]](<2 x s16>), [[COPY20]](<2 x s16>), [[COPY21]](<2 x s16>), [[COPY22]](<2 x s16>), [[COPY23]](<2 x s16>), [[COPY24]](<2 x s16>), [[COPY25]](<2 x s16>), [[COPY26]](<2 x s16>), [[COPY27]](<2 x s16>), [[COPY28]](<2 x s16>), [[COPY29]](<2 x s16>), [[COPY30]](<2 x s16>), [[LOAD]](<2 x s16>), [[LOAD1]](<2 x s16>)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16), [[UV12:%[0-9]+]]:_(s16), [[UV13:%[0-9]+]]:_(s16), [[UV14:%[0-9]+]]:_(s16), [[UV15:%[0-9]+]]:_(s16), [[UV16:%[0-9]+]]:_(s16), [[UV17:%[0-9]+]]:_(s16), [[UV18:%[0-9]+]]:_(s16), [[UV19:%[0-9]+]]:_(s16), [[UV20:%[0-9]+]]:_(s16), [[UV21:%[0-9]+]]:_(s16), [[UV22:%[0-9]+]]:_(s16), [[UV23:%[0-9]+]]:_(s16), [[UV24:%[0-9]+]]:_(s16), [[UV25:%[0-9]+]]:_(s16), [[UV26:%[0-9]+]]:_(s16), [[UV27:%[0-9]+]]:_(s16), [[UV28:%[0-9]+]]:_(s16), [[UV29:%[0-9]+]]:_(s16), [[UV30:%[0-9]+]]:_(s16), [[UV31:%[0-9]+]]:_(s16), [[UV32:%[0-9]+]]:_(s16), [[UV33:%[0-9]+]]:_(s16), [[UV34:%[0-9]+]]:_(s16), [[UV35:%[0-9]+]]:_(s16), [[UV36:%[0-9]+]]:_(s16), [[UV37:%[0-9]+]]:_(s16), [[UV38:%[0-9]+]]:_(s16), [[UV39:%[0-9]+]]:_(s16), [[UV40:%[0-9]+]]:_(s16), [[UV41:%[0-9]+]]:_(s16), [[UV42:%[0-9]+]]:_(s16), [[UV43:%[0-9]+]]:_(s16), [[UV44:%[0-9]+]]:_(s16), [[UV45:%[0-9]+]]:_(s16), [[UV46:%[0-9]+]]:_(s16), [[UV47:%[0-9]+]]:_(s16), [[UV48:%[0-9]+]]:_(s16), [[UV49:%[0-9]+]]:_(s16), [[UV50:%[0-9]+]]:_(s16), [[UV51:%[0-9]+]]:_(s16), [[UV52:%[0-9]+]]:_(s16), [[UV53:%[0-9]+]]:_(s16), [[UV54:%[0-9]+]]:_(s16), [[UV55:%[0-9]+]]:_(s16), [[UV56:%[0-9]+]]:_(s16), [[UV57:%[0-9]+]]:_(s16), [[UV58:%[0-9]+]]:_(s16), [[UV59:%[0-9]+]]:_(s16), [[UV60:%[0-9]+]]:_(s16), [[UV61:%[0-9]+]]:_(s16), [[UV62:%[0-9]+]]:_(s16), [[UV63:%[0-9]+]]:_(s16), [[UV64:%[0-9]+]]:_(s16), [[UV65:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<66 x s16>)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<65 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[UV2]](s16), [[UV3]](s16), [[UV4]](s16), [[UV5]](s16), [[UV6]](s16), [[UV7]](s16), [[UV8]](s16), [[UV9]](s16), [[UV10]](s16), [[UV11]](s16), [[UV12]](s16), [[UV13]](s16), [[UV14]](s16), [[UV15]](s16), [[UV16]](s16), [[UV17]](s16), [[UV18]](s16), [[UV19]](s16), [[UV20]](s16), [[UV21]](s16), [[UV22]](s16), [[UV23]](s16), [[UV24]](s16), [[UV25]](s16), [[UV26]](s16), [[UV27]](s16), [[UV28]](s16), [[UV29]](s16), [[UV30]](s16), [[UV31]](s16), [[UV32]](s16), [[UV33]](s16), [[UV34]](s16), [[UV35]](s16), [[UV36]](s16), [[UV37]](s16), [[UV38]](s16), [[UV39]](s16), [[UV40]](s16), [[UV41]](s16), [[UV42]](s16), [[UV43]](s16), [[UV44]](s16), [[UV45]](s16), [[UV46]](s16), [[UV47]](s16), [[UV48]](s16), [[UV49]](s16), [[UV50]](s16), [[UV51]](s16), [[UV52]](s16), [[UV53]](s16), [[UV54]](s16), [[UV55]](s16), [[UV56]](s16), [[UV57]](s16), [[UV58]](s16), [[UV59]](s16), [[UV60]](s16), [[UV61]](s16), [[UV62]](s16), [[UV63]](s16), [[UV64]](s16)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<65 x s16>), [[DEF]](p1) :: (store (<65 x s16>) into `<65 x i16> addrspace(1)* undef`, align 256, addrspace 1)
-  ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY32]]
+  ; CHECK-NEXT:   SI_RETURN
   store <65 x i16> %arg0, <65 x i16> addrspace(1)* undef
   ret void
 }
@@ -1378,16 +1256,14 @@ define void @void_func_v65i16(<65 x i16> %arg0) #0 {
 define void @void_func_v2f32(<2 x float> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v2f32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<2 x s32>), [[DEF]](p1) :: (store (<2 x s32>) into `<2 x float> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store <2 x float> %arg0, <2 x float> addrspace(1)* undef
   ret void
 }
@@ -1395,17 +1271,15 @@ define void @void_func_v2f32(<2 x float> %arg0) #0 {
 define void @void_func_v3f32(<3 x float> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v3f32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<3 x s32>), [[DEF]](p1) :: (store (<3 x s32>) into `<3 x float> addrspace(1)* undef`, align 16, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   store <3 x float> %arg0, <3 x float> addrspace(1)* undef
   ret void
 }
@@ -1413,18 +1287,16 @@ define void @void_func_v3f32(<3 x float> %arg0) #0 {
 define void @void_func_v4f32(<4 x float> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v4f32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<4 x s32>), [[DEF]](p1) :: (store (<4 x s32>) into `<4 x float> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]]
+  ; CHECK-NEXT:   SI_RETURN
   store <4 x float> %arg0, <4 x float> addrspace(1)* undef
   ret void
 }
@@ -1432,7 +1304,7 @@ define void @void_func_v4f32(<4 x float> %arg0) #0 {
 define void @void_func_v8f32(<8 x float> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v8f32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1443,11 +1315,9 @@ define void @void_func_v8f32(<8 x float> %arg0) #0 {
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<8 x s32>), [[DEF]](p1) :: (store (<8 x s32>) into `<8 x float> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY9]]
+  ; CHECK-NEXT:   SI_RETURN
   store <8 x float> %arg0, <8 x float> addrspace(1)* undef
   ret void
 }
@@ -1455,7 +1325,7 @@ define void @void_func_v8f32(<8 x float> %arg0) #0 {
 define void @void_func_v16f32(<16 x float> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v16f32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1474,11 +1344,9 @@ define void @void_func_v16f32(<16 x float> %arg0) #0 {
   ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY $vgpr14
   ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY $vgpr15
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32), [[COPY8]](s32), [[COPY9]](s32), [[COPY10]](s32), [[COPY11]](s32), [[COPY12]](s32), [[COPY13]](s32), [[COPY14]](s32), [[COPY15]](s32)
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<16 x s32>), [[DEF]](p1) :: (store (<16 x s32>) into `<16 x float> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY17]]
+  ; CHECK-NEXT:   SI_RETURN
   store <16 x float> %arg0, <16 x float> addrspace(1)* undef
   ret void
 }
@@ -1486,7 +1354,7 @@ define void @void_func_v16f32(<16 x float> %arg0) #0 {
 define void @void_func_v2f64(<2 x double> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v2f64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1495,11 +1363,9 @@ define void @void_func_v2f64(<2 x double> %arg0) #0 {
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<2 x s64>), [[DEF]](p1) :: (store (<2 x s64>) into `<2 x double> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]]
+  ; CHECK-NEXT:   SI_RETURN
   store <2 x double> %arg0, <2 x double> addrspace(1)* undef
   ret void
 }
@@ -1507,7 +1373,7 @@ define void @void_func_v2f64(<2 x double> %arg0) #0 {
 define void @void_func_v3f64(<3 x double> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v3f64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1519,11 +1385,9 @@ define void @void_func_v3f64(<3 x double> %arg0) #0 {
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
   ; CHECK-NEXT:   [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<3 x s64>), [[DEF]](p1) :: (store (<3 x s64>) into `<3 x double> addrspace(1)* undef`, align 32, addrspace 1)
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY7]]
+  ; CHECK-NEXT:   SI_RETURN
   store <3 x double> %arg0, <3 x double> addrspace(1)* undef
   ret void
 }
@@ -1531,7 +1395,7 @@ define void @void_func_v3f64(<3 x double> %arg0) #0 {
 define void @void_func_v4f64(<4 x double> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v4f64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1546,11 +1410,9 @@ define void @void_func_v4f64(<4 x double> %arg0) #0 {
   ; CHECK-NEXT:   [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
   ; CHECK-NEXT:   [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<4 x s64>), [[DEF]](p1) :: (store (<4 x s64>) into `<4 x double> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY9]]
+  ; CHECK-NEXT:   SI_RETURN
   store <4 x double> %arg0, <4 x double> addrspace(1)* undef
   ret void
 }
@@ -1558,7 +1420,7 @@ define void @void_func_v4f64(<4 x double> %arg0) #0 {
 define void @void_func_v8f64(<8 x double> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v8f64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1585,11 +1447,9 @@ define void @void_func_v8f64(<8 x double> %arg0) #0 {
   ; CHECK-NEXT:   [[MV6:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY12]](s32), [[COPY13]](s32)
   ; CHECK-NEXT:   [[MV7:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY14]](s32), [[COPY15]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64), [[MV4]](s64), [[MV5]](s64), [[MV6]](s64), [[MV7]](s64)
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<8 x s64>), [[DEF]](p1) :: (store (<8 x s64>) into `<8 x double> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY17]]
+  ; CHECK-NEXT:   SI_RETURN
   store <8 x double> %arg0, <8 x double> addrspace(1)* undef
   ret void
 }
@@ -1597,7 +1457,7 @@ define void @void_func_v8f64(<8 x double> %arg0) #0 {
 define void @void_func_v16f64(<16 x double> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v16f64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -1649,11 +1509,9 @@ define void @void_func_v16f64(<16 x double> %arg0) #0 {
   ; CHECK-NEXT:   [[MV14:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY28]](s32), [[COPY29]](s32)
   ; CHECK-NEXT:   [[MV15:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY30]](s32), [[LOAD]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64), [[MV2]](s64), [[MV3]](s64), [[MV4]](s64), [[MV5]](s64), [[MV6]](s64), [[MV7]](s64), [[MV8]](s64), [[MV9]](s64), [[MV10]](s64), [[MV11]](s64), [[MV12]](s64), [[MV13]](s64), [[MV14]](s64), [[MV15]](s64)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<16 x s64>), [[DEF]](p1) :: (store (<16 x s64>) into `<16 x double> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY32]]
+  ; CHECK-NEXT:   SI_RETURN
   store <16 x double> %arg0, <16 x double> addrspace(1)* undef
   ret void
 }
@@ -1661,14 +1519,12 @@ define void @void_func_v16f64(<16 x double> %arg0) #0 {
 define void @void_func_v2f16(<2 x half> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v2f16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[COPY]](<2 x s16>), [[DEF]](p1) :: (store (<2 x s16>) into `<2 x half> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store <2 x half> %arg0, <2 x half> addrspace(1)* undef
   ret void
 }
@@ -1676,18 +1532,16 @@ define void @void_func_v2f16(<2 x half> %arg0) #0 {
 define void @void_func_v3f16(<3 x half> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v3f16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s16>)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[UV2]](s16)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<3 x s16>), [[DEF]](p1) :: (store (<3 x s16>) into `<3 x half> addrspace(1)* undef`, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store <3 x half> %arg0, <3 x half> addrspace(1)* undef
   ret void
 }
@@ -1695,16 +1549,14 @@ define void @void_func_v3f16(<3 x half> %arg0) #0 {
 define void @void_func_v4f16(<4 x half> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v4f16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[CONCAT_VECTORS]](<4 x s16>), [[DEF]](p1) :: (store (<4 x s16>) into `<4 x half> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store <4 x half> %arg0, <4 x half> addrspace(1)* undef
   ret void
 }
@@ -1712,18 +1564,16 @@ define void @void_func_v4f16(<4 x half> %arg0) #0 {
 define void @void_func_v8f16(<8 x half> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v8f16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr3
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>), [[COPY2]](<2 x s16>), [[COPY3]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[CONCAT_VECTORS]](<8 x s16>), [[DEF]](p1) :: (store (<8 x s16>) into `<8 x half> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]]
+  ; CHECK-NEXT:   SI_RETURN
   store <8 x half> %arg0, <8 x half> addrspace(1)* undef
   ret void
 }
@@ -1731,7 +1581,7 @@ define void @void_func_v8f16(<8 x half> %arg0) #0 {
 define void @void_func_v16f16(<16 x half> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v16f16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -1742,11 +1592,9 @@ define void @void_func_v16f16(<16 x half> %arg0) #0 {
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr6
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr7
   ; CHECK-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<16 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>), [[COPY2]](<2 x s16>), [[COPY3]](<2 x s16>), [[COPY4]](<2 x s16>), [[COPY5]](<2 x s16>), [[COPY6]](<2 x s16>), [[COPY7]](<2 x s16>)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[CONCAT_VECTORS]](<16 x s16>), [[DEF]](p1) :: (store (<16 x s16>) into `<16 x half> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY9]]
+  ; CHECK-NEXT:   SI_RETURN
   store <16 x half> %arg0, <16 x half> addrspace(1)* undef
   ret void
 }
@@ -1755,21 +1603,19 @@ define void @void_func_v16f16(<16 x half> %arg0) #0 {
 define void @void_func_i32_i64_i32(i32 %arg0, i64 %arg1, i32 %arg2) #0 {
   ; CHECK-LABEL: name: void_func_i32_i64_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   G_STORE [[COPY]](s32), [[DEF]](p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[MV]](s64), [[COPY5]](p1) :: (volatile store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[MV]](s64), [[COPY4]](p1) :: (volatile store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   G_STORE [[COPY3]](s32), [[DEF]](p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY6]]
+  ; CHECK-NEXT:   SI_RETURN
   store volatile i32 %arg0, i32 addrspace(1)* undef
   store volatile i64 %arg1, i64 addrspace(1)* undef
   store volatile i32 %arg2, i32 addrspace(1)* undef
@@ -1779,14 +1625,12 @@ define void @void_func_i32_i64_i32(i32 %arg0, i64 %arg1, i32 %arg2) #0 {
 define void @void_func_struct_i32({ i32 } %arg0) #0 {
   ; CHECK-LABEL: name: void_func_struct_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[COPY]](s32), [[DEF]](p1) :: (store (s32) into `{ i32 } addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   store { i32 } %arg0, { i32 } addrspace(1)* undef
   ret void
 }
@@ -1794,20 +1638,18 @@ define void @void_func_struct_i32({ i32 } %arg0) #0 {
 define void @void_func_struct_i8_i32({ i8, i32 } %arg0) #0 {
   ; CHECK-LABEL: name: void_func_struct_i8_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC]](s16)
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC1]](s8), [[DEF]](p1) :: (store (s8) into `{ i8, i32 } addrspace(1)* undef`, align 4, addrspace 1)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
   ; CHECK-NEXT:   G_STORE [[COPY1]](s32), [[PTR_ADD]](p1) :: (store (s32) into `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   store { i8, i32 } %arg0, { i8, i32 } addrspace(1)* undef
   ret void
 }
@@ -1815,11 +1657,8 @@ define void @void_func_struct_i8_i32({ i8, i32 } %arg0) #0 {
 define void @void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg0) #0 {
   ; CHECK-LABEL: name: void_func_byval_struct_i8_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX]](p5)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p5) :: (dereferenceable load (s8) from %ir.arg0, align 4, addrspace 5)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
@@ -1829,8 +1668,7 @@ define void @void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval({ i8,
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
   ; CHECK-NEXT:   G_STORE [[LOAD1]](s32), [[PTR_ADD1]](p1) :: (store (s32) into `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]]
+  ; CHECK-NEXT:   SI_RETURN
   %arg0.load = load { i8, i32 }, { i8, i32 } addrspace(5)* %arg0
   store { i8, i32 } %arg0.load, { i8, i32 } addrspace(1)* undef
   ret void
@@ -1839,14 +1677,13 @@ define void @void_func_byval_struct_i8_i32({ i8, i32 } addrspace(5)* byval({ i8,
 define void @void_func_byval_struct_i8_i32_x2({ i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg0, { i8, i32 } addrspace(5)* byval({ i8, i32 }) %arg1, i32 %arg2) #0 {
   ; CHECK-LABEL: name: void_func_byval_struct_i8_i32_x2
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX]](p5)
   ; CHECK-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX1]](p5)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[DEF1:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p5) :: (volatile dereferenceable load (s8) from %ir.arg0, align 4, addrspace 5)
@@ -1864,8 +1701,7 @@ define void @void_func_byval_struct_i8_i32_x2({ i8, i32 } addrspace(5)* byval({
   ; CHECK-NEXT:   [[PTR_ADD3:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C1]](s64)
   ; CHECK-NEXT:   G_STORE [[LOAD3]](s32), [[PTR_ADD3]](p1) :: (volatile store (s32) into `{ i8, i32 } addrspace(1)* undef` + 4, addrspace 1)
   ; CHECK-NEXT:   G_STORE [[COPY2]](s32), [[DEF1]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   %arg0.load = load volatile { i8, i32 }, { i8, i32 } addrspace(5)* %arg0
   %arg1.load = load volatile { i8, i32 }, { i8, i32 } addrspace(5)* %arg1
   store volatile { i8, i32 } %arg0.load, { i8, i32 } addrspace(1)* undef
@@ -1877,21 +1713,17 @@ define void @void_func_byval_struct_i8_i32_x2({ i8, i32 } addrspace(5)* byval({
 define void @void_func_byval_i32_byval_i64(i32 addrspace(5)* byval(i32) %arg0, i64 addrspace(5)* byval(i64) %arg1) #0 {
   ; CHECK-LABEL: name: void_func_byval_i32_byval_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX]](p5)
   ; CHECK-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX1]](p5)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (dereferenceable load (s32) from %ir.arg0, addrspace 5)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p5) :: (dereferenceable load (s64) from %ir.arg1, addrspace 5)
   ; CHECK-NEXT:   G_STORE [[LOAD]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[LOAD1]](s64), [[COPY3]](p1) :: (store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   G_STORE [[LOAD1]](s64), [[COPY2]](p1) :: (store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   %arg0.load = load i32, i32 addrspace(5)* %arg0
   %arg1.load = load i64, i64 addrspace(5)* %arg1
   store i32 %arg0.load, i32 addrspace(1)* undef
@@ -1902,21 +1734,17 @@ define void @void_func_byval_i32_byval_i64(i32 addrspace(5)* byval(i32) %arg0, i
 define void @void_func_byval_i8_align32_i16_align64(i8 addrspace(5)* byval(i8) %arg0, i16 addrspace(5)* byval(i16) align 64 %arg1) #0 {
   ; CHECK-LABEL: name: void_func_byval_i8_align32_i16_align64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX]](p5)
   ; CHECK-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX1]](p5)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(p1) = COPY [[C]](p1)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p1) = COPY [[C]](p1)
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p5) :: (dereferenceable load (s8) from %ir.arg0, addrspace 5)
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[COPY1]](p5) :: (dereferenceable load (s16) from %ir.arg1, addrspace 5)
   ; CHECK-NEXT:   G_STORE [[LOAD]](s8), [[C]](p1) :: (store (s8) into `i8 addrspace(1)* null`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[LOAD1]](s16), [[COPY3]](p1) :: (store (s16) into `i16 addrspace(1)* null`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   G_STORE [[LOAD1]](s16), [[COPY2]](p1) :: (store (s16) into `i16 addrspace(1)* null`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   %arg0.load = load i8, i8 addrspace(5)* %arg0
   %arg1.load = load i16, i16 addrspace(5)* %arg1
   store i8 %arg0.load, i8 addrspace(1)* null
@@ -1928,15 +1756,12 @@ define void @void_func_byval_i8_align32_i16_align64(i8 addrspace(5)* byval(i8) %
 define void @byval_a3i32_align128_byval_i16_align64([3 x i32] addrspace(5)* byval([3 x i32]) align 128 %arg0, i16 addrspace(5)* byval(i16) align 64 %arg1) #0 {
   ; CHECK-LABEL: name: byval_a3i32_align128_byval_i16_align64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX]](p5)
   ; CHECK-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX1]](p5)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(p1) = COPY [[C]](p1)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p1) = COPY [[C]](p1)
   ; CHECK-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p5) :: (dereferenceable load (s32) from %ir.arg0, addrspace 5)
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p5) = G_PTR_ADD [[COPY]], [[C1]](s32)
@@ -1952,9 +1777,8 @@ define void @byval_a3i32_align128_byval_i16_align64([3 x i32] addrspace(5)* byva
   ; CHECK-NEXT:   [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; CHECK-NEXT:   [[C6:%[0-9]+]]:_(p1) = G_CONSTANT i64 8
   ; CHECK-NEXT:   G_STORE [[LOAD2]](s32), [[C6]](p1) :: (store (s32) into `[3 x i32] addrspace(1)* null` + 8, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[LOAD3]](s16), [[COPY3]](p1) :: (store (s16) into `i16 addrspace(1)* null`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   G_STORE [[LOAD3]](s16), [[COPY2]](p1) :: (store (s16) into `i16 addrspace(1)* null`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   %arg0.load = load [3 x i32], [3 x i32] addrspace(5)* %arg0
   %arg1.load = load i16, i16 addrspace(5)* %arg1
   store [3 x i32] %arg0.load, [3 x i32] addrspace(1)* null
@@ -1966,7 +1790,7 @@ define void @byval_a3i32_align128_byval_i16_align64([3 x i32] addrspace(5)* byva
 define void @void_func_v32i32_i32_byval_i8(<32 x i32> %arg0, i32 %arg1, i8 addrspace(5)* byval(i8) align 8 %arg2) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_i32_byval_i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2006,14 +1830,12 @@ define void @void_func_v32i32_i32_byval_i8(<32 x i32> %arg0, i32 %arg1, i8 addrs
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.1, addrspace 5)
   ; CHECK-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX2]](p5)
-  ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
-  ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:_(p1) = COPY [[C]](p1)
+  ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[C]](p1)
   ; CHECK-NEXT:   G_STORE [[LOAD1]](s32), [[C]](p1) :: (store (s32) into `i32 addrspace(1)* null`, addrspace 1)
   ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(s8) = G_LOAD [[COPY31]](p5) :: (dereferenceable load (s8) from %ir.arg2, addrspace 5)
-  ; CHECK-NEXT:   G_STORE [[LOAD2]](s8), [[COPY33]](p1) :: (store (s8) into `i8 addrspace(1)* null`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY34:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY34]]
+  ; CHECK-NEXT:   G_STORE [[LOAD2]](s8), [[COPY32]](p1) :: (store (s8) into `i8 addrspace(1)* null`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store i32 %arg1, i32 addrspace(1)* null
   %arg2.load = load i8, i8 addrspace(5)* %arg2
   store i8 %arg2.load, i8 addrspace(1)* null
@@ -2024,7 +1846,7 @@ define void @void_func_v32i32_i32_byval_i8(<32 x i32> %arg0, i32 %arg1, i8 addrs
 define void @void_func_v32i32_byval_i8_i32(<32 x i32> %arg0, i8 addrspace(5)* byval(i8) %arg1, i32 %arg2) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_byval_i8_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2064,14 +1886,12 @@ define void @void_func_v32i32_byval_i8_i32(<32 x i32> %arg0, i8 addrspace(5)* by
   ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX1]](p5)
   ; CHECK-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.0, align 8, addrspace 5)
-  ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
-  ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:_(p1) = COPY [[C]](p1)
+  ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[C]](p1)
   ; CHECK-NEXT:   G_STORE [[LOAD1]](s32), [[C]](p1) :: (store (s32) into `i32 addrspace(1)* null`, addrspace 1)
   ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(s8) = G_LOAD [[COPY31]](p5) :: (dereferenceable load (s8) from %ir.arg1, addrspace 5)
-  ; CHECK-NEXT:   G_STORE [[LOAD2]](s8), [[COPY33]](p1) :: (store (s8) into `i8 addrspace(1)* null`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY34:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY32]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY34]]
+  ; CHECK-NEXT:   G_STORE [[LOAD2]](s8), [[COPY32]](p1) :: (store (s8) into `i8 addrspace(1)* null`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store i32 %arg2, i32 addrspace(1)* null
   %arg1.load = load i8, i8 addrspace(5)* %arg1
   store i8 %arg1.load, i8 addrspace(1)* null
@@ -2081,7 +1901,7 @@ define void @void_func_v32i32_byval_i8_i32(<32 x i32> %arg0, i8 addrspace(5)* by
 define void @void_func_v32i32_i32_i64(<32 x i32> %arg0, i32 %arg1, i64 %arg2) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_i32_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2124,15 +1944,13 @@ define void @void_func_v32i32_i32_i64(<32 x i32> %arg0, i32 %arg1, i64 %arg2) #0
   ; CHECK-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load (s32) from %fixed-stack.0, addrspace 5)
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD2]](s32), [[LOAD3]](s32)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
-  ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `<32 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[LOAD1]](s32), [[COPY32]](p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[MV]](s64), [[COPY33]](p1) :: (volatile store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY34:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY34]]
+  ; CHECK-NEXT:   G_STORE [[LOAD1]](s32), [[COPY31]](p1) :: (volatile store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[MV]](s64), [[COPY32]](p1) :: (volatile store (s64) into `i64 addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
   store volatile i32 %arg1, i32 addrspace(1)* undef
   store volatile i64 %arg2, i64 addrspace(1)* undef
@@ -2143,7 +1961,7 @@ define void @void_func_v32i32_i32_i64(<32 x i32> %arg0, i32 %arg1, i64 %arg2) #0
 define void @void_func_v32i32_i1_i8_i16(<32 x i32> %arg0, i1 %arg1, i8 %arg2, i16 %arg3, half %arg4) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_i1_i8_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2189,19 +2007,17 @@ define void @void_func_v32i32_i1_i8_i16(<32 x i32> %arg0, i1 %arg1, i8 %arg2, i1
   ; CHECK-NEXT:   [[LOAD3:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX3]](p5) :: (invariant load (s16) from %fixed-stack.1, align 4, addrspace 5)
   ; CHECK-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[LOAD4:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX4]](p5) :: (invariant load (s16) from %fixed-stack.0, align 16, addrspace 5)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[COPY34:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
-  ; CHECK-NEXT:   [[COPY35:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `<32 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[TRUNC]](s1), [[COPY32]](p1) :: (volatile store (s1) into `i1 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[TRUNC1]](s8), [[COPY33]](p1) :: (volatile store (s8) into `i8 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[LOAD3]](s16), [[COPY34]](p1) :: (volatile store (s16) into `i16 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[LOAD4]](s16), [[COPY35]](p1) :: (volatile store (s16) into `half addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY36:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY36]]
+  ; CHECK-NEXT:   G_STORE [[TRUNC]](s1), [[COPY31]](p1) :: (volatile store (s1) into `i1 addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[TRUNC1]](s8), [[COPY32]](p1) :: (volatile store (s8) into `i8 addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[LOAD3]](s16), [[COPY33]](p1) :: (volatile store (s16) into `i16 addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[LOAD4]](s16), [[COPY34]](p1) :: (volatile store (s16) into `half addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
   store volatile i1 %arg1, i1 addrspace(1)* undef
   store volatile i8 %arg2, i8 addrspace(1)* undef
@@ -2213,7 +2029,7 @@ define void @void_func_v32i32_i1_i8_i16(<32 x i32> %arg0, i1 %arg1, i8 %arg2, i1
 define void @void_func_v32i32_p3_p5_i16(<32 x i32> %arg0, i8 addrspace(3)* %arg1, i8 addrspace(5)* %arg2) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_p3_p5_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2253,15 +2069,13 @@ define void @void_func_v32i32_p3_p5_i16(<32 x i32> %arg0, i8 addrspace(3)* %arg1
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(p3) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (p3) from %fixed-stack.1, addrspace 5)
   ; CHECK-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(p5) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (p5) from %fixed-stack.0, align 8, addrspace 5)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
-  ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `<32 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[LOAD1]](p3), [[COPY32]](p1) :: (volatile store (p3) into `i8 addrspace(3)* addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[LOAD2]](p5), [[COPY33]](p1) :: (volatile store (p5) into `i8 addrspace(5)* addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY34:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY34]]
+  ; CHECK-NEXT:   G_STORE [[LOAD1]](p3), [[COPY31]](p1) :: (volatile store (p3) into `i8 addrspace(3)* addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[LOAD2]](p5), [[COPY32]](p1) :: (volatile store (p5) into `i8 addrspace(5)* addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
   store volatile i8 addrspace(3)* %arg1, i8 addrspace(3)* addrspace(1)* undef
   store volatile i8 addrspace(5)* %arg2, i8 addrspace(5)* addrspace(1)* undef
@@ -2271,7 +2085,7 @@ define void @void_func_v32i32_p3_p5_i16(<32 x i32> %arg0, i8 addrspace(3)* %arg1
 define void @void_func_v32i32_v2i32_v2f32(<32 x i32> %arg0, <2 x i32> %arg1, <2 x float> %arg2) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_v2i32_v2f32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2317,15 +2131,13 @@ define void @void_func_v32i32_v2i32_v2f32(<32 x i32> %arg0, <2 x i32> %arg1, <2
   ; CHECK-NEXT:   [[FRAME_INDEX4:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[LOAD4:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX4]](p5) :: (invariant load (s32) from %fixed-stack.0, align 16, addrspace 5)
   ; CHECK-NEXT:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[LOAD3]](s32), [[LOAD4]](s32)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
-  ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `<32 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR1]](<2 x s32>), [[COPY32]](p1) :: (volatile store (<2 x s32>) into `<2 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR2]](<2 x s32>), [[COPY33]](p1) :: (volatile store (<2 x s32>) into `<2 x float> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY34:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY34]]
+  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR1]](<2 x s32>), [[COPY31]](p1) :: (volatile store (<2 x s32>) into `<2 x i32> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR2]](<2 x s32>), [[COPY32]](p1) :: (volatile store (<2 x s32>) into `<2 x float> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
   store volatile <2 x i32> %arg1, <2 x i32> addrspace(1)* undef
   store volatile <2 x float> %arg2, <2 x float> addrspace(1)* undef
@@ -2335,7 +2147,7 @@ define void @void_func_v32i32_v2i32_v2f32(<32 x i32> %arg0, <2 x i32> %arg1, <2
 define void @void_func_v32i32_v2i16_v2f16(<32 x i32> %arg0, <2 x i16> %arg1, <2 x half> %arg2) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_v2i16_v2f16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2375,15 +2187,13 @@ define void @void_func_v32i32_v2i16_v2f16(<32 x i32> %arg0, <2 x i16> %arg1, <2
   ; CHECK-NEXT:   [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (<2 x s16>) from %fixed-stack.1, addrspace 5)
   ; CHECK-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[LOAD2:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (<2 x s16>) from %fixed-stack.0, align 8, addrspace 5)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
-  ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `<32 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[LOAD1]](<2 x s16>), [[COPY32]](p1) :: (volatile store (<2 x s16>) into `<2 x i16> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[LOAD2]](<2 x s16>), [[COPY33]](p1) :: (volatile store (<2 x s16>) into `<2 x half> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY34:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY34]]
+  ; CHECK-NEXT:   G_STORE [[LOAD1]](<2 x s16>), [[COPY31]](p1) :: (volatile store (<2 x s16>) into `<2 x i16> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[LOAD2]](<2 x s16>), [[COPY32]](p1) :: (volatile store (<2 x s16>) into `<2 x half> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
   store volatile <2 x i16> %arg1, <2 x i16> addrspace(1)* undef
   store volatile <2 x half> %arg2, <2 x half> addrspace(1)* undef
@@ -2393,7 +2203,7 @@ define void @void_func_v32i32_v2i16_v2f16(<32 x i32> %arg0, <2 x i16> %arg1, <2
 define void @void_func_v32i32_v2i64_v2f64(<32 x i32> %arg0, <2 x i64> %arg1, <2 x double> %arg2) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_v2i64_v2f64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2451,15 +2261,13 @@ define void @void_func_v32i32_v2i64_v2f64(<32 x i32> %arg0, <2 x i64> %arg1, <2
   ; CHECK-NEXT:   [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD5]](s32), [[LOAD6]](s32)
   ; CHECK-NEXT:   [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[LOAD7]](s32), [[LOAD8]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV2]](s64), [[MV3]](s64)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
-  ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `<32 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR1]](<2 x s64>), [[COPY32]](p1) :: (volatile store (<2 x s64>) into `<2 x i64> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR2]](<2 x s64>), [[COPY33]](p1) :: (volatile store (<2 x s64>) into `<2 x double> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY34:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY34]]
+  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR1]](<2 x s64>), [[COPY31]](p1) :: (volatile store (<2 x s64>) into `<2 x i64> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR2]](<2 x s64>), [[COPY32]](p1) :: (volatile store (<2 x s64>) into `<2 x double> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
   store volatile <2 x i64> %arg1, <2 x i64> addrspace(1)* undef
   store volatile <2 x double> %arg2, <2 x double> addrspace(1)* undef
@@ -2469,7 +2277,7 @@ define void @void_func_v32i32_v2i64_v2f64(<32 x i32> %arg0, <2 x i64> %arg1, <2
 define void @void_func_v32i32_v4i32_v4f32(<32 x i32> %arg0, <4 x i32> %arg1, <4 x float> %arg2) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_v4i32_v4f32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2523,15 +2331,13 @@ define void @void_func_v32i32_v4i32_v4f32(<32 x i32> %arg0, <4 x i32> %arg1, <4
   ; CHECK-NEXT:   [[FRAME_INDEX8:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[LOAD8:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX8]](p5) :: (invariant load (s32) from %fixed-stack.0, align 16, addrspace 5)
   ; CHECK-NEXT:   [[BUILD_VECTOR2:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[LOAD5]](s32), [[LOAD6]](s32), [[LOAD7]](s32), [[LOAD8]](s32)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
-  ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `<32 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[COPY32]](p1) :: (volatile store (<4 x s32>) into `<4 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR2]](<4 x s32>), [[COPY33]](p1) :: (volatile store (<4 x s32>) into `<4 x float> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY34:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY34]]
+  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR1]](<4 x s32>), [[COPY31]](p1) :: (volatile store (<4 x s32>) into `<4 x i32> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR2]](<4 x s32>), [[COPY32]](p1) :: (volatile store (<4 x s32>) into `<4 x float> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
   store volatile <4 x i32> %arg1, <4 x i32> addrspace(1)* undef
   store volatile <4 x float> %arg2, <4 x float> addrspace(1)* undef
@@ -2541,7 +2347,7 @@ define void @void_func_v32i32_v4i32_v4f32(<32 x i32> %arg0, <4 x i32> %arg1, <4
 define void @void_func_v32i32_v8i32_v8f32(<32 x i32> %arg0, <8 x i32> %arg1, <8 x float> %arg2) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_v8i32_v8f32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2611,15 +2417,13 @@ define void @void_func_v32i32_v8i32_v8f32(<32 x i32> %arg0, <8 x i32> %arg1, <8
   ; CHECK-NEXT:   [[FRAME_INDEX16:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[LOAD16:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX16]](p5) :: (invariant load (s32) from %fixed-stack.0, align 16, addrspace 5)
   ; CHECK-NEXT:   [[BUILD_VECTOR2:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[LOAD9]](s32), [[LOAD10]](s32), [[LOAD11]](s32), [[LOAD12]](s32), [[LOAD13]](s32), [[LOAD14]](s32), [[LOAD15]](s32), [[LOAD16]](s32)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
-  ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `<32 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR1]](<8 x s32>), [[COPY32]](p1) :: (volatile store (<8 x s32>) into `<8 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR2]](<8 x s32>), [[COPY33]](p1) :: (volatile store (<8 x s32>) into `<8 x float> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY34:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY34]]
+  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR1]](<8 x s32>), [[COPY31]](p1) :: (volatile store (<8 x s32>) into `<8 x i32> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR2]](<8 x s32>), [[COPY32]](p1) :: (volatile store (<8 x s32>) into `<8 x float> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
   store volatile <8 x i32> %arg1, <8 x i32> addrspace(1)* undef
   store volatile <8 x float> %arg2, <8 x float> addrspace(1)* undef
@@ -2629,7 +2433,7 @@ define void @void_func_v32i32_v8i32_v8f32(<32 x i32> %arg0, <8 x i32> %arg1, <8
 define void @void_func_v32i32_v16i32_v16f32(<32 x i32> %arg0, <16 x i32> %arg1, <16 x float> %arg2) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_v16i32_v16f32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2731,15 +2535,13 @@ define void @void_func_v32i32_v16i32_v16f32(<32 x i32> %arg0, <16 x i32> %arg1,
   ; CHECK-NEXT:   [[FRAME_INDEX32:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; CHECK-NEXT:   [[LOAD32:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX32]](p5) :: (invariant load (s32) from %fixed-stack.0, align 16, addrspace 5)
   ; CHECK-NEXT:   [[BUILD_VECTOR2:%[0-9]+]]:_(<16 x s32>) = G_BUILD_VECTOR [[LOAD17]](s32), [[LOAD18]](s32), [[LOAD19]](s32), [[LOAD20]](s32), [[LOAD21]](s32), [[LOAD22]](s32), [[LOAD23]](s32), [[LOAD24]](s32), [[LOAD25]](s32), [[LOAD26]](s32), [[LOAD27]](s32), [[LOAD28]](s32), [[LOAD29]](s32), [[LOAD30]](s32), [[LOAD31]](s32), [[LOAD32]](s32)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
+  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
-  ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `<32 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR1]](<16 x s32>), [[COPY32]](p1) :: (volatile store (<16 x s32>) into `<16 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR2]](<16 x s32>), [[COPY33]](p1) :: (volatile store (<16 x s32>) into `<16 x float> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY34:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY34]]
+  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR1]](<16 x s32>), [[COPY31]](p1) :: (volatile store (<16 x s32>) into `<16 x i32> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR2]](<16 x s32>), [[COPY32]](p1) :: (volatile store (<16 x s32>) into `<16 x float> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
   store volatile <16 x i32> %arg1, <16 x i32> addrspace(1)* undef
   store volatile <16 x float> %arg2, <16 x float> addrspace(1)* undef
@@ -2750,28 +2552,26 @@ define void @void_func_v32i32_v16i32_v16f32(<32 x i32> %arg0, <16 x i32> %arg1,
 define void @void_func_v3f32_wasted_reg(<3 x float> %arg0, i32 %arg1) #0 {
   ; CHECK-LABEL: name: void_func_v3f32_wasted_reg
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p3) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(p3) = COPY [[DEF]](p3)
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(p3) = COPY [[DEF]](p3)
   ; CHECK-NEXT:   [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[C]](s32)
   ; CHECK-NEXT:   [[EVEC1:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[C1]](s32)
   ; CHECK-NEXT:   [[EVEC2:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[BUILD_VECTOR]](<3 x s32>), [[C2]](s32)
   ; CHECK-NEXT:   G_STORE [[EVEC]](s32), [[DEF]](p3) :: (volatile store (s32) into `float addrspace(3)* undef`, addrspace 3)
   ; CHECK-NEXT:   G_STORE [[EVEC1]](s32), [[DEF]](p3) :: (volatile store (s32) into `float addrspace(3)* undef`, addrspace 3)
   ; CHECK-NEXT:   G_STORE [[EVEC2]](s32), [[DEF]](p3) :: (volatile store (s32) into `float addrspace(3)* undef`, addrspace 3)
-  ; CHECK-NEXT:   G_STORE [[COPY3]](s32), [[COPY5]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY6]]
+  ; CHECK-NEXT:   G_STORE [[COPY3]](s32), [[COPY4]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
+  ; CHECK-NEXT:   SI_RETURN
   %arg0.0 = extractelement <3 x float> %arg0, i32 0
   %arg0.1 = extractelement <3 x float> %arg0, i32 1
   %arg0.2 = extractelement <3 x float> %arg0, i32 2
@@ -2785,14 +2585,13 @@ define void @void_func_v3f32_wasted_reg(<3 x float> %arg0, i32 %arg1) #0 {
 define void @void_func_v3i32_wasted_reg(<3 x i32> %arg0, i32 %arg1) #0 {
   ; CHECK-LABEL: name: void_func_v3i32_wasted_reg
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32)
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
@@ -2804,8 +2603,7 @@ define void @void_func_v3i32_wasted_reg(<3 x i32> %arg0, i32 %arg1) #0 {
   ; CHECK-NEXT:   G_STORE [[EVEC1]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
   ; CHECK-NEXT:   G_STORE [[EVEC2]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
   ; CHECK-NEXT:   G_STORE [[COPY3]](s32), [[DEF]](p3) :: (volatile store (s32) into `i32 addrspace(3)* undef`, addrspace 3)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]]
+  ; CHECK-NEXT:   SI_RETURN
   %arg0.0 = extractelement <3 x i32> %arg0, i32 0
   %arg0.1 = extractelement <3 x i32> %arg0, i32 1
   %arg0.2 = extractelement <3 x i32> %arg0, i32 2
@@ -2820,7 +2618,7 @@ define void @void_func_v3i32_wasted_reg(<3 x i32> %arg0, i32 %arg1) #0 {
 define void @void_func_v16i8(<16 x i8> %arg0) #0 {
   ; CHECK-LABEL: name: void_func_v16i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
@@ -2856,11 +2654,9 @@ define void @void_func_v16i8(<16 x i8> %arg0) #0 {
   ; CHECK-NEXT:   [[TRUNC15:%[0-9]+]]:_(s16) = G_TRUNC [[COPY15]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16), [[TRUNC3]](s16), [[TRUNC4]](s16), [[TRUNC5]](s16), [[TRUNC6]](s16), [[TRUNC7]](s16), [[TRUNC8]](s16), [[TRUNC9]](s16), [[TRUNC10]](s16), [[TRUNC11]](s16), [[TRUNC12]](s16), [[TRUNC13]](s16), [[TRUNC14]](s16), [[TRUNC15]](s16)
   ; CHECK-NEXT:   [[TRUNC16:%[0-9]+]]:_(<16 x s8>) = G_TRUNC [[BUILD_VECTOR]](<16 x s16>)
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[TRUNC16]](<16 x s8>), [[DEF]](p1) :: (volatile store (<16 x s8>) into `<16 x i8> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY16]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY17]]
+  ; CHECK-NEXT:   SI_RETURN
   store volatile <16 x i8> %arg0, <16 x i8> addrspace(1)* undef
   ret void
 }
@@ -2869,7 +2665,7 @@ define void @void_func_v16i8(<16 x i8> %arg0) #0 {
 define void @void_func_v32i32_v16i8(<32 x i32> %arg0, <16 x i8> %arg1) #0 {
   ; CHECK-LABEL: name: void_func_v32i32_v16i8
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2939,13 +2735,11 @@ define void @void_func_v32i32_v16i8(<32 x i32> %arg0, <16 x i8> %arg1) #0 {
   ; CHECK-NEXT:   [[LOAD16:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX16]](p5) :: (invariant load (s16) from %fixed-stack.0, align 16, addrspace 5)
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s16>) = G_BUILD_VECTOR [[LOAD1]](s16), [[LOAD2]](s16), [[LOAD3]](s16), [[LOAD4]](s16), [[LOAD5]](s16), [[LOAD6]](s16), [[LOAD7]](s16), [[LOAD8]](s16), [[LOAD9]](s16), [[LOAD10]](s16), [[LOAD11]](s16), [[LOAD12]](s16), [[LOAD13]](s16), [[LOAD14]](s16), [[LOAD15]](s16), [[LOAD16]](s16)
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(<16 x s8>) = G_TRUNC [[BUILD_VECTOR1]](<16 x s16>)
-  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   [[COPY32:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
+  ; CHECK-NEXT:   [[COPY31:%[0-9]+]]:_(p1) = COPY [[DEF]](p1)
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<32 x s32>), [[DEF]](p1) :: (volatile store (<32 x s32>) into `<32 x i32> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[TRUNC]](<16 x s8>), [[COPY32]](p1) :: (volatile store (<16 x s8>) into `<16 x i8> addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY33:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY33]]
+  ; CHECK-NEXT:   G_STORE [[TRUNC]](<16 x s8>), [[COPY31]](p1) :: (volatile store (<16 x s8>) into `<16 x i8> addrspace(1)* undef`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   store volatile <32 x i32> %arg0, <32 x i32> addrspace(1)* undef
   store volatile <16 x i8> %arg1, <16 x i8> addrspace(1)* undef
   ret void
@@ -2954,7 +2748,7 @@ define void @void_func_v32i32_v16i8(<32 x i32> %arg0, <16 x i8> %arg1) #0 {
 define void @pointer_in_struct_argument({i8 addrspace(3)*, i8 addrspace(1)*} %arg0, i8 %pad, {i8 addrspace(3)*, i8 addrspace(1234)*} %arg1) {
   ; CHECK-LABEL: name: pointer_in_struct_argument
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -2967,18 +2761,16 @@ define void @pointer_in_struct_argument({i8 addrspace(3)*, i8 addrspace(1)*} %ar
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(p1234) = G_MERGE_VALUES [[COPY5]](s32), [[COPY6]](s32)
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p1) = G_CONSTANT i64 0
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(p1) = COPY [[C]](p1)
   ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p1) = COPY [[C]](p1)
   ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p1) = COPY [[C]](p1)
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p1) = COPY [[C]](p1)
   ; CHECK-NEXT:   G_STORE [[COPY]](p3), [[C]](p1) :: (volatile store (p3) into `i8 addrspace(3)* addrspace(1)* null`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[MV]](p1), [[COPY8]](p1) :: (volatile store (p1) into `i8 addrspace(1)* addrspace(1)* null`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[TRUNC1]](s8), [[COPY9]](p1) :: (volatile store (s8) into `i8 addrspace(1)* null`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[MV]](p1), [[COPY7]](p1) :: (volatile store (p1) into `i8 addrspace(1)* addrspace(1)* null`, addrspace 1)
+  ; CHECK-NEXT:   G_STORE [[TRUNC1]](s8), [[COPY8]](p1) :: (volatile store (s8) into `i8 addrspace(1)* null`, addrspace 1)
   ; CHECK-NEXT:   G_STORE [[COPY4]](p3), [[C]](p1) :: (volatile store (p3) into `i8 addrspace(3)* addrspace(1)* null`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[MV1]](p1234), [[COPY10]](p1) :: (volatile store (p1234) into `i8 addrspace(1234)* addrspace(1)* null`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY7]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY11]]
+  ; CHECK-NEXT:   G_STORE [[MV1]](p1234), [[COPY9]](p1) :: (volatile store (p1234) into `i8 addrspace(1234)* addrspace(1)* null`, addrspace 1)
+  ; CHECK-NEXT:   SI_RETURN
   %val0 = extractvalue {i8 addrspace(3)*, i8 addrspace(1)*} %arg0, 0
   %val1 = extractvalue {i8 addrspace(3)*, i8 addrspace(1)*} %arg0, 1
   %val2 = extractvalue {i8 addrspace(3)*, i8 addrspace(1234)*} %arg1, 0
@@ -2994,7 +2786,7 @@ define void @pointer_in_struct_argument({i8 addrspace(3)*, i8 addrspace(1)*} %ar
 define void @vector_ptr_in_struct_arg({ <2 x i8 addrspace(1)*>, <2 x i8 addrspace(3)*> } %arg) {
   ; CHECK-LABEL: name: vector_ptr_in_struct_arg
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -3006,14 +2798,12 @@ define void @vector_ptr_in_struct_arg({ <2 x i8 addrspace(1)*>, <2 x i8 addrspac
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(p3) = COPY $vgpr4
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(p3) = COPY $vgpr5
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x p3>) = G_BUILD_VECTOR [[COPY4]](p3), [[COPY5]](p3)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR]](<2 x p1>), [[DEF]](p1) :: (store (<2 x p1>) into `{ <2 x i8 addrspace(1)*>, <2 x i8 addrspace(3)*> } addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p1) = G_PTR_ADD [[DEF]], [[C]](s64)
   ; CHECK-NEXT:   G_STORE [[BUILD_VECTOR1]](<2 x p3>), [[PTR_ADD]](p1) :: (store (<2 x p3>) into `{ <2 x i8 addrspace(1)*>, <2 x i8 addrspace(3)*> } addrspace(1)* undef` + 16, align 16, addrspace 1)
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY7]]
+  ; CHECK-NEXT:   SI_RETURN
   store { <2 x i8 addrspace(1)*>, <2 x i8 addrspace(3)*> } %arg, { <2 x i8 addrspace(1)*>, <2 x i8 addrspace(3)*> } addrspace(1)* undef
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-getelementptr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-getelementptr.ll
index 45f1baf1a0a23..aa9926b18e63c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-getelementptr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-getelementptr.ll
@@ -5,7 +5,7 @@
 define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i64(<2 x i32 addrspace(1)*> %ptr, <2 x i64> %idx) {
   ; CHECK-LABEL: name: vector_gep_v2p1_index_v2i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -21,19 +21,17 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i64(<2 x i32 addrspace(1
   ; CHECK-NEXT:   [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
   ; CHECK-NEXT:   [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV2]](s64), [[MV3]](s64)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK-NEXT:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
   ; CHECK-NEXT:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(<2 x p1>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>)
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
-  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY9]](<2 x p1>)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY8]](<2 x p1>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY10]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %gep = getelementptr i32, <2 x i32 addrspace(1)*> %ptr, <2 x i64> %idx
   ret <2 x i32 addrspace(1)*> %gep
 }
@@ -42,7 +40,7 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i64(<2 x i32 addrspace(1
 define <2 x i32 addrspace(3)*> @vector_gep_v2p3_index_v2i32(<2 x i32 addrspace(3)*> %ptr, <2 x i32> %idx) {
   ; CHECK-LABEL: name: vector_gep_v2p3_index_v2i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(p3) = COPY $vgpr1
@@ -50,17 +48,15 @@ define <2 x i32 addrspace(3)*> @vector_gep_v2p3_index_v2i32(<2 x i32 addrspace(3
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; CHECK-NEXT:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
   ; CHECK-NEXT:   [[MUL:%[0-9]+]]:_(<2 x s32>) = G_MUL [[BUILD_VECTOR1]], [[BUILD_VECTOR2]]
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(<2 x p3>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s32>)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(<2 x p3>) = COPY [[PTR_ADD]](<2 x p3>)
-  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY5]](<2 x p3>)
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(<2 x p3>) = COPY [[PTR_ADD]](<2 x p3>)
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY4]](<2 x p3>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY6]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %gep = getelementptr i32, <2 x i32 addrspace(3)*> %ptr, <2 x i32> %idx
   ret <2 x i32 addrspace(3)*> %gep
 }
@@ -69,7 +65,7 @@ define <2 x i32 addrspace(3)*> @vector_gep_v2p3_index_v2i32(<2 x i32 addrspace(3
 define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i32(<2 x i32 addrspace(1)*> %ptr, <2 x i32> %idx) {
   ; CHECK-LABEL: name: vector_gep_v2p1_index_v2i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -81,20 +77,18 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i32(<2 x i32 addrspace(1
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY4]](s32), [[COPY5]](s32)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(<2 x s64>) = G_SEXT [[BUILD_VECTOR1]](<2 x s32>)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK-NEXT:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
   ; CHECK-NEXT:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[SEXT]], [[BUILD_VECTOR2]]
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(<2 x p1>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>)
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
-  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY7]](<2 x p1>)
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY6]](<2 x p1>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY8]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %gep = getelementptr i32, <2 x i32 addrspace(1)*> %ptr, <2 x i32> %idx
   ret <2 x i32 addrspace(1)*> %gep
 }
@@ -103,7 +97,7 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i32(<2 x i32 addrspace(1
 define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_i64(<2 x i32 addrspace(1)*> %ptr, i64 %idx) {
   ; CHECK-LABEL: name: vector_gep_v2p1_index_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -115,21 +109,19 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_i64(<2 x i32 addrspace(1)*
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
   ; CHECK-NEXT:   [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV2]](s64), [[MV2]](s64)
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(<2 x s64>) = COPY [[BUILD_VECTOR1]](<2 x s64>)
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(<2 x s64>) = COPY [[BUILD_VECTOR1]](<2 x s64>)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK-NEXT:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
-  ; CHECK-NEXT:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[COPY7]], [[BUILD_VECTOR2]]
+  ; CHECK-NEXT:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[COPY6]], [[BUILD_VECTOR2]]
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(<2 x p1>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
-  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY8]](<2 x p1>)
+  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY7]](<2 x p1>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY9]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %gep = getelementptr i32, <2 x i32 addrspace(1)*> %ptr, i64 %idx
   ret <2 x i32 addrspace(1)*> %gep
 }
@@ -138,7 +130,7 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_i64(<2 x i32 addrspace(1)*
 define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_i32(<2 x i32 addrspace(1)*> %ptr, i32 %idx) {
   ; CHECK-LABEL: name: vector_gep_v2p1_index_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -148,21 +140,19 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_i32(<2 x i32 addrspace(1)*
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p1>) = G_BUILD_VECTOR [[MV]](p1), [[MV1]](p1)
   ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY4]](s32), [[COPY4]](s32)
   ; CHECK-NEXT:   [[SEXT:%[0-9]+]]:_(<2 x s64>) = G_SEXT [[BUILD_VECTOR1]](<2 x s32>)
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
   ; CHECK-NEXT:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64)
   ; CHECK-NEXT:   [[MUL:%[0-9]+]]:_(<2 x s64>) = G_MUL [[SEXT]], [[BUILD_VECTOR2]]
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(<2 x p1>) = G_PTR_ADD [[BUILD_VECTOR]], [[MUL]](<2 x s64>)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
-  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY6]](<2 x p1>)
+  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY5]](<2 x p1>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY5]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %gep = getelementptr i32, <2 x i32 addrspace(1)*> %ptr, i32 %idx
   ret <2 x i32 addrspace(1)*> %gep
 }
@@ -171,7 +161,7 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_i32(<2 x i32 addrspace(1)*
 define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i64_constant(<2 x i32 addrspace(1)*> %ptr, <2 x i64> %idx) {
   ; CHECK-LABEL: name: vector_gep_v2p1_index_v2i64_constant
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -187,7 +177,6 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i64_constant(<2 x i32 ad
   ; CHECK-NEXT:   [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32)
   ; CHECK-NEXT:   [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32)
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV2]](s64), [[MV3]](s64)
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
   ; CHECK-NEXT:   [[BUILD_VECTOR2:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C1]](s64)
@@ -195,16 +184,15 @@ define <2 x i32 addrspace(1)*> @vector_gep_v2p1_index_v2i64_constant(<2 x i32 ad
   ; CHECK-NEXT:   [[BUILD_VECTOR3:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C2]](s64), [[C2]](s64)
   ; CHECK-NEXT:   [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
   ; CHECK-NEXT:   [[BUILD_VECTOR4:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C2]](s64), [[C3]](s64)
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(<2 x s64>) = COPY [[BUILD_VECTOR4]](<2 x s64>)
-  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(<2 x p1>) = G_PTR_ADD [[BUILD_VECTOR]], [[COPY9]](<2 x s64>)
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
-  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY10]](<2 x p1>)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(<2 x s64>) = COPY [[BUILD_VECTOR4]](<2 x s64>)
+  ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(<2 x p1>) = G_PTR_ADD [[BUILD_VECTOR]], [[COPY8]](<2 x s64>)
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(<2 x p1>) = COPY [[PTR_ADD]](<2 x p1>)
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY9]](<2 x p1>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY11]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %gep = getelementptr i32, <2 x i32 addrspace(1)*> %ptr, <2 x i64> <i64 1, i64 2>
   ret <2 x i32 addrspace(1)*> %gep
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll
index cc2c95324c2ed..f0383259abb7b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-indirect-call.ll
@@ -57,19 +57,17 @@ define amdgpu_kernel void @test_indirect_call_sgpr_ptr(void()* %fptr) {
 define amdgpu_gfx void @test_gfx_indirect_call_sgpr_ptr(void()* %fptr) {
   ; CHECK-LABEL: name: test_gfx_indirect_call_sgpr_ptr
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
   ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[MV]](p0), 0, csr_amdgpu_si_gfx, implicit $sgpr0_sgpr1_sgpr2_sgpr3
   ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:gfx_ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return_gfx [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call amdgpu_gfx void %fptr()
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
index 038b0082c2b17..273498491cd33 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-inline-asm.ll
@@ -51,16 +51,12 @@ define amdgpu_kernel void @asm_simple_agpr_clobber() {
 define i32 @asm_vgpr_early_clobber() {
   ; CHECK-LABEL: name: asm_vgpr_early_clobber
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   INLINEASM &"v_mov_b32 $0, 7; v_mov_b32 $1, 7", 1 /* sideeffect attdialect */, 1835019 /* regdef-ec:VGPR_32 */, def early-clobber %1, 1835019 /* regdef-ec:VGPR_32 */, def early-clobber %2, !0
+  ; CHECK-NEXT:   INLINEASM &"v_mov_b32 $0, 7; v_mov_b32 $1, 7", 1 /* sideeffect attdialect */, 1835019 /* regdef-ec:VGPR_32 */, def early-clobber %0, 1835019 /* regdef-ec:VGPR_32 */, def early-clobber %1, !0
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY %0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY %2
-  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY2]]
+  ; CHECK-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[ADD]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   call { i32, i32 } asm sideeffect "v_mov_b32 $0, 7; v_mov_b32 $1, 7", "=&v,=&v"(), !srcloc !0
   %asmresult = extractvalue { i32, i32 } %1, 0
   %asmresult1 = extractvalue { i32, i32 } %1, 1
@@ -71,14 +67,10 @@ define i32 @asm_vgpr_early_clobber() {
 define i32 @test_specific_vgpr_output() nounwind {
   ; CHECK-LABEL: name: test_specific_vgpr_output
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   INLINEASM &"v_mov_b32 v1, 7", 0 /* attdialect */, 10 /* regdef */, implicit-def $vgpr1
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr1
+  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   %0 = tail call i32 asm "v_mov_b32 v1, 7", "={v1}"() nounwind
   ret i32 %0
@@ -87,14 +79,10 @@ entry:
 define i32 @test_single_vgpr_output() nounwind {
   ; CHECK-LABEL: name: test_single_vgpr_output
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   INLINEASM &"v_mov_b32 $0, 7", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %1
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
-  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
+  ; CHECK-NEXT:   INLINEASM &"v_mov_b32 $0, 7", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %0
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY %0
+  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   %0 = tail call i32 asm "v_mov_b32 $0, 7", "=v"() nounwind
   ret i32 %0
@@ -103,14 +91,10 @@ entry:
 define i32 @test_single_sgpr_output_s32() nounwind {
   ; CHECK-LABEL: name: test_single_sgpr_output_s32
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   INLINEASM &"s_mov_b32 $0, 7", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %1
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
-  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
+  ; CHECK-NEXT:   INLINEASM &"s_mov_b32 $0, 7", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %0
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY %0
+  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   %0 = tail call i32 asm "s_mov_b32 $0, 7", "=s"() nounwind
   ret i32 %0
@@ -120,16 +104,12 @@ entry:
 define float @test_multiple_register_outputs_same() #0 {
   ; CHECK-LABEL: name: test_multiple_register_outputs_same
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   INLINEASM &"v_mov_b32 $0, 0; v_mov_b32 $1, 1", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %1, 1835018 /* regdef:VGPR_32 */, def %2
+  ; CHECK-NEXT:   INLINEASM &"v_mov_b32 $0, 0; v_mov_b32 $1, 1", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %0, 1835018 /* regdef:VGPR_32 */, def %1
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY %0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY %2
-  ; CHECK-NEXT:   [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY1]], [[COPY2]]
+  ; CHECK-NEXT:   [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY]], [[COPY1]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[FADD]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %1 = call { float, float } asm "v_mov_b32 $0, 0; v_mov_b32 $1, 1", "=v,=v"()
   %asmresult = extractvalue { float, float } %1, 0
   %asmresult1 = extractvalue { float, float } %1, 1
@@ -141,17 +121,13 @@ define float @test_multiple_register_outputs_same() #0 {
 define double @test_multiple_register_outputs_mixed() #0 {
   ; CHECK-LABEL: name: test_multiple_register_outputs_mixed
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   INLINEASM &"v_mov_b32 $0, 0; v_add_f64 $1, 0, 0", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %1, 2949130 /* regdef:VReg_64 */, def %2
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s64) = COPY %2
-  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY2]](s64)
+  ; CHECK-NEXT:   INLINEASM &"v_mov_b32 $0, 0; v_add_f64 $1, 0, 0", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %0, 2949130 /* regdef:VReg_64 */, def %1
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY %0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s64) = COPY %1
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %1 = call { float, double } asm "v_mov_b32 $0, 0; v_add_f64 $1, 0, 0", "=v,=v"()
   %asmresult = extractvalue { float, double } %1, 1
   ret double %asmresult
@@ -161,16 +137,12 @@ define double @test_multiple_register_outputs_mixed() #0 {
 define float @test_vector_output() nounwind {
   ; CHECK-LABEL: name: test_vector_output
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; CHECK-NEXT:   INLINEASM &"v_add_f64 $0, 0, 0", 1 /* sideeffect attdialect */, 10 /* regdef */, implicit-def $vgpr14_vgpr15
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr14_vgpr15
-  ; CHECK-NEXT:   [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY1]](<2 x s32>), [[C]](s32)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr14_vgpr15
+  ; CHECK-NEXT:   [[EVEC:%[0-9]+]]:_(s32) = G_EXTRACT_VECTOR_ELT [[COPY]](<2 x s32>), [[C]](s32)
   ; CHECK-NEXT:   $vgpr0 = COPY [[EVEC]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %1 = tail call <2 x float> asm sideeffect "v_add_f64 $0, 0, 0", "={v[14:15]}"() nounwind
   %2 = extractelement <2 x float> %1, i32 0
   ret float %2
@@ -212,16 +184,14 @@ define amdgpu_kernel void @test_input_imm() {
 define float @test_input_vgpr(i32 %src) nounwind {
   ; CHECK-LABEL: name: test_input_vgpr
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   INLINEASM &"v_add_f32 $0, 1.0, $1", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %2, 1835017 /* reguse:VGPR_32 */, [[COPY2]]
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY %2
-  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   INLINEASM &"v_add_f32 $0, 1.0, $1", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %1, 1835017 /* reguse:VGPR_32 */, [[COPY1]]
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY %1
+  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY2]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   %0 = tail call float asm "v_add_f32 $0, 1.0, $1", "=v,v"(i32 %src) nounwind
   ret float %0
@@ -230,15 +200,13 @@ entry:
 define i32 @test_memory_constraint(i32 addrspace(3)* %a) nounwind {
   ; CHECK-LABEL: name: test_memory_constraint
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   INLINEASM &"ds_read_b32 $0, $1", 8 /* mayload attdialect */, 1835018 /* regdef:VGPR_32 */, def %2, 196622 /* mem:m */, [[COPY]](p3)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY %2
-  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   INLINEASM &"ds_read_b32 $0, $1", 8 /* mayload attdialect */, 1835018 /* regdef:VGPR_32 */, def %1, 196622 /* mem:m */, [[COPY]](p3)
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
+  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY1]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %1 = tail call i32 asm "ds_read_b32 $0, $1", "=v,*m"(i32 addrspace(3)* elementtype(i32) %a)
   ret i32 %1
 }
@@ -246,18 +214,16 @@ define i32 @test_memory_constraint(i32 addrspace(3)* %a) nounwind {
 define i32 @test_vgpr_matching_constraint(i32 %a) nounwind {
   ; CHECK-LABEL: name: test_vgpr_matching_constraint
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
   ; CHECK-NEXT:   [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C]]
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[AND]](s32)
-  ; CHECK-NEXT:   INLINEASM &";", 1 /* sideeffect attdialect */, 1835018 /* regdef:VGPR_32 */, def %4, 2147483657 /* reguse tiedto:$0 */, [[COPY2]](tied-def 3)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY %4
-  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[AND]](s32)
+  ; CHECK-NEXT:   INLINEASM &";", 1 /* sideeffect attdialect */, 1835018 /* regdef:VGPR_32 */, def %3, 2147483657 /* reguse tiedto:$0 */, [[COPY1]](tied-def 3)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY %3
+  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY2]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %and = and i32 %a, 1
   %asm = call i32 asm sideeffect ";", "=v,0"(i32 %and)
   ret i32 %asm
@@ -266,20 +232,16 @@ define i32 @test_vgpr_matching_constraint(i32 %a) nounwind {
 define i32 @test_sgpr_matching_constraint() nounwind {
   ; CHECK-LABEL: name: test_sgpr_matching_constraint
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   INLINEASM &"s_mov_b32 $0, 7", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %1
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
-  ; CHECK-NEXT:   INLINEASM &"s_mov_b32 $0, 8", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %3
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY %3
+  ; CHECK-NEXT:   INLINEASM &"s_mov_b32 $0, 7", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %0
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY %0
+  ; CHECK-NEXT:   INLINEASM &"s_mov_b32 $0, 8", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %2
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY %2
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sreg_32 = COPY [[COPY]](s32)
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sreg_32 = COPY [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sreg_32 = COPY [[COPY2]](s32)
-  ; CHECK-NEXT:   INLINEASM &"s_add_u32 $0, $1, $2", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %5, 1966089 /* reguse:SReg_32 */, [[COPY3]], 2147483657 /* reguse tiedto:$0 */, [[COPY4]](tied-def 3)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY %5
-  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY5]](s32)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY6]], implicit $vgpr0
+  ; CHECK-NEXT:   INLINEASM &"s_add_u32 $0, $1, $2", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %4, 1966089 /* reguse:SReg_32 */, [[COPY2]], 2147483657 /* reguse tiedto:$0 */, [[COPY3]](tied-def 3)
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY %4
+  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY4]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   %asm0 = tail call i32 asm "s_mov_b32 $0, 7", "=s"() nounwind
   %asm1 = tail call i32 asm "s_mov_b32 $0, 8", "=s"() nounwind
@@ -290,25 +252,23 @@ entry:
 define void @test_many_matching_constraints(i32 %a, i32 %b, i32 %c) nounwind {
   ; CHECK-LABEL: name: test_many_matching_constraints
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[DEF:%[0-9]+]]:_(p1) = G_IMPLICIT_DEF
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:vgpr_32 = COPY [[COPY1]](s32)
-  ; CHECK-NEXT:   INLINEASM &"; ", 1 /* sideeffect attdialect */, 1835018 /* regdef:VGPR_32 */, def %4, 1835018 /* regdef:VGPR_32 */, def %5, 1835018 /* regdef:VGPR_32 */, def %6, 2147483657 /* reguse tiedto:$0 */, [[COPY4]](tied-def 3), 2147614729 /* reguse tiedto:$2 */, [[COPY5]](tied-def 7), 2147549193 /* reguse tiedto:$1 */, [[COPY6]](tied-def 5)
+  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY [[COPY2]](s32)
+  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:vgpr_32 = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:vgpr_32 = COPY [[COPY1]](s32)
+  ; CHECK-NEXT:   INLINEASM &"; ", 1 /* sideeffect attdialect */, 1835018 /* regdef:VGPR_32 */, def %3, 1835018 /* regdef:VGPR_32 */, def %4, 1835018 /* regdef:VGPR_32 */, def %5, 2147483657 /* reguse tiedto:$0 */, [[COPY3]](tied-def 3), 2147614729 /* reguse tiedto:$2 */, [[COPY4]](tied-def 7), 2147549193 /* reguse tiedto:$1 */, [[COPY5]](tied-def 5)
+  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:_(s32) = COPY %3
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:_(s32) = COPY %4
   ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(s32) = COPY %5
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(s32) = COPY %6
+  ; CHECK-NEXT:   G_STORE [[COPY6]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   G_STORE [[COPY7]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
   ; CHECK-NEXT:   G_STORE [[COPY8]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   G_STORE [[COPY9]](s32), [[DEF]](p1) :: (store (s32) into `i32 addrspace(1)* undef`, addrspace 1)
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY10]]
+  ; CHECK-NEXT:   SI_RETURN
   %asm = call {i32, i32, i32} asm sideeffect "; ", "=v,=v,=v,0,2,1"(i32 %c, i32 %a, i32 %b)
   %asmresult0 = extractvalue  {i32, i32, i32} %asm, 0
   store i32 %asmresult0, i32 addrspace(1)* undef
@@ -322,17 +282,13 @@ define void @test_many_matching_constraints(i32 %a, i32 %b, i32 %c) nounwind {
 define i32 @test_sgpr_to_vgpr_move_matching_constraint() nounwind {
   ; CHECK-LABEL: name: test_sgpr_to_vgpr_move_matching_constraint
   ; CHECK: bb.1.entry:
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   INLINEASM &"s_mov_b32 $0, 7", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %1
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY %1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY [[COPY1]](s32)
-  ; CHECK-NEXT:   INLINEASM &"v_mov_b32 $0, $1", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %3, 2147483657 /* reguse tiedto:$0 */, [[COPY2]](tied-def 3)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY %3
-  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+  ; CHECK-NEXT:   INLINEASM &"s_mov_b32 $0, 7", 0 /* attdialect */, 1966090 /* regdef:SReg_32 */, def %0
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY %0
+  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   INLINEASM &"v_mov_b32 $0, $1", 0 /* attdialect */, 1835018 /* regdef:VGPR_32 */, def %2, 2147483657 /* reguse tiedto:$0 */, [[COPY1]](tied-def 3)
+  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY %2
+  ; CHECK-NEXT:   $vgpr0 = COPY [[COPY2]](s32)
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   %asm0 = tail call i32 asm "s_mov_b32 $0, 7", "=s"() nounwind
   %asm1 = tail call i32 asm "v_mov_b32 $0, $1", "=v,0"(i32 %asm0) nounwind

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-memory-intrinsics.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-memory-intrinsics.ll
index ec903d3462880..8e90bd593a108 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-memory-intrinsics.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-memory-intrinsics.ll
@@ -6,18 +6,16 @@
 define void @test_memcpy_p1_p3_i64(i8 addrspace(1)* %dst, i8 addrspace(3)* %src) {
   ; CHECK-LABEL: name: test_memcpy_p1_p3_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p3) = COPY $vgpr2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
   ; CHECK-NEXT:   G_MEMCPY [[MV]](p1), [[COPY2]](p3), [[TRUNC]](s32), 0 :: (store (s8) into %ir.dst, addrspace 1), (load (s8) from %ir.src, addrspace 3)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memcpy.p1i8.p3i8.i64(i8 addrspace(1)* %dst, i8 addrspace(3)* %src, i64 256, i1 false)
   ret void
 }
@@ -25,17 +23,15 @@ define void @test_memcpy_p1_p3_i64(i8 addrspace(1)* %dst, i8 addrspace(3)* %src)
 define void @test_memcpy_p1_p3_i32(i8 addrspace(1)* %dst, i8 addrspace(3)* %src) {
   ; CHECK-LABEL: name: test_memcpy_p1_p3_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p3) = COPY $vgpr2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 256
   ; CHECK-NEXT:   G_MEMCPY [[MV]](p1), [[COPY2]](p3), [[C]](s32), 0 :: (store (s8) into %ir.dst, addrspace 1), (load (s8) from %ir.src, addrspace 3)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memcpy.p1i8.p3i8.i32(i8 addrspace(1)* %dst, i8 addrspace(3)* %src, i32 256, i1 false)
   ret void
 }
@@ -43,18 +39,16 @@ define void @test_memcpy_p1_p3_i32(i8 addrspace(1)* %dst, i8 addrspace(3)* %src)
 define void @test_memcpy_p1_p3_i16(i8 addrspace(1)* %dst, i8 addrspace(3)* %src) {
   ; CHECK-LABEL: name: test_memcpy_p1_p3_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p3) = COPY $vgpr2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 256
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
   ; CHECK-NEXT:   G_MEMCPY [[MV]](p1), [[COPY2]](p3), [[ZEXT]](s32), 0 :: (store (s8) into %ir.dst, addrspace 1), (load (s8) from %ir.src, addrspace 3)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memcpy.p1i8.p3i8.i16(i8 addrspace(1)* %dst, i8 addrspace(3)* %src, i16 256, i1 false)
   ret void
 }
@@ -62,18 +56,16 @@ define void @test_memcpy_p1_p3_i16(i8 addrspace(1)* %dst, i8 addrspace(3)* %src)
 define void @test_memcpy_p3_p1_i64(i8 addrspace(3)* %dst, i8 addrspace(1)* %src) {
   ; CHECK-LABEL: name: test_memcpy_p3_p1_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
   ; CHECK-NEXT:   G_MEMCPY [[COPY]](p3), [[MV]](p1), [[TRUNC]](s32), 0 :: (store (s8) into %ir.dst, addrspace 3), (load (s8) from %ir.src, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memcpy.p3i8.p1i8.i64(i8 addrspace(3)* %dst, i8 addrspace(1)* %src, i64 256, i1 false)
   ret void
 }
@@ -81,17 +73,15 @@ define void @test_memcpy_p3_p1_i64(i8 addrspace(3)* %dst, i8 addrspace(1)* %src)
 define void @test_memcpy_p3_p1_i32(i8 addrspace(3)* %dst, i8 addrspace(1)* %src) {
   ; CHECK-LABEL: name: test_memcpy_p3_p1_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 256
   ; CHECK-NEXT:   G_MEMCPY [[COPY]](p3), [[MV]](p1), [[C]](s32), 0 :: (store (s8) into %ir.dst, addrspace 3), (load (s8) from %ir.src, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memcpy.p3i8.p1i8.i32(i8 addrspace(3)* %dst, i8 addrspace(1)* %src, i32 256, i1 false)
   ret void
 }
@@ -99,18 +89,16 @@ define void @test_memcpy_p3_p1_i32(i8 addrspace(3)* %dst, i8 addrspace(1)* %src)
 define void @test_memcpy_p3_p1_i16(i8 addrspace(3)* %dst, i8 addrspace(1)* %src) {
   ; CHECK-LABEL: name: test_memcpy_p3_p1_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 256
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
   ; CHECK-NEXT:   G_MEMCPY [[COPY]](p3), [[MV]](p1), [[ZEXT]](s32), 0 :: (store (s8) into %ir.dst, addrspace 3), (load (s8) from %ir.src, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memcpy.p3i8.p1i8.i16(i8 addrspace(3)* %dst, i8 addrspace(1)* %src, i16 256, i1 false)
   ret void
 }
@@ -118,18 +106,16 @@ define void @test_memcpy_p3_p1_i16(i8 addrspace(3)* %dst, i8 addrspace(1)* %src)
 define void @test_memmove_p1_p3_i64(i8 addrspace(1)* %dst, i8 addrspace(3)* %src) {
   ; CHECK-LABEL: name: test_memmove_p1_p3_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p3) = COPY $vgpr2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
   ; CHECK-NEXT:   G_MEMMOVE [[MV]](p1), [[COPY2]](p3), [[TRUNC]](s32), 0 :: (store (s8) into %ir.dst, addrspace 1), (load (s8) from %ir.src, addrspace 3)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memmove.p1i8.p3i8.i64(i8 addrspace(1)* %dst, i8 addrspace(3)* %src, i64 256, i1 false)
   ret void
 }
@@ -137,17 +123,15 @@ define void @test_memmove_p1_p3_i64(i8 addrspace(1)* %dst, i8 addrspace(3)* %src
 define void @test_memmove_p1_p3_i32(i8 addrspace(1)* %dst, i8 addrspace(3)* %src) {
   ; CHECK-LABEL: name: test_memmove_p1_p3_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p3) = COPY $vgpr2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 256
   ; CHECK-NEXT:   G_MEMMOVE [[MV]](p1), [[COPY2]](p3), [[C]](s32), 0 :: (store (s8) into %ir.dst, addrspace 1), (load (s8) from %ir.src, addrspace 3)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memmove.p1i8.p3i8.i32(i8 addrspace(1)* %dst, i8 addrspace(3)* %src, i32 256, i1 false)
   ret void
 }
@@ -155,18 +139,16 @@ define void @test_memmove_p1_p3_i32(i8 addrspace(1)* %dst, i8 addrspace(3)* %src
 define void @test_memmove_p1_p3_i16(i8 addrspace(1)* %dst, i8 addrspace(3)* %src) {
   ; CHECK-LABEL: name: test_memmove_p1_p3_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(p3) = COPY $vgpr2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 256
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
   ; CHECK-NEXT:   G_MEMMOVE [[MV]](p1), [[COPY2]](p3), [[ZEXT]](s32), 0 :: (store (s8) into %ir.dst, addrspace 1), (load (s8) from %ir.src, addrspace 3)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memmove.p1i8.p3i8.i16(i8 addrspace(1)* %dst, i8 addrspace(3)* %src, i16 256, i1 false)
   ret void
 }
@@ -174,18 +156,16 @@ define void @test_memmove_p1_p3_i16(i8 addrspace(1)* %dst, i8 addrspace(3)* %src
 define void @test_memset_p1_i64(i8 addrspace(1)* %dst, i8 %val) {
   ; CHECK-LABEL: name: test_memset_p1_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
   ; CHECK-NEXT:   G_MEMSET [[MV]](p1), [[TRUNC]](s8), [[C]](s64), 0 :: (store (s8) into %ir.dst, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memset.p1i8.i64(i8 addrspace(1)* %dst, i8 %val, i64 256, i1 false)
   ret void
 }
@@ -193,19 +173,17 @@ define void @test_memset_p1_i64(i8 addrspace(1)* %dst, i8 %val) {
 define void @test_memset_p1_i32(i8 addrspace(1)* %dst, i8 %val) {
   ; CHECK-LABEL: name: test_memset_p1_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 256
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s32)
   ; CHECK-NEXT:   G_MEMSET [[MV]](p1), [[TRUNC]](s8), [[ZEXT]](s64), 0 :: (store (s8) into %ir.dst, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memset.p1i8.i32(i8 addrspace(1)* %dst, i8 %val, i32 256, i1 false)
   ret void
 }
@@ -213,19 +191,17 @@ define void @test_memset_p1_i32(i8 addrspace(1)* %dst, i8 %val) {
 define void @test_memset_p1_i16(i8 addrspace(1)* %dst, i8 %val) {
   ; CHECK-LABEL: name: test_memset_p1_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 256
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[C]](s16)
   ; CHECK-NEXT:   G_MEMSET [[MV]](p1), [[TRUNC]](s8), [[ZEXT]](s64), 0 :: (store (s8) into %ir.dst, addrspace 1)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memset.p1i8.i16(i8 addrspace(1)* %dst, i8 %val, i16 256, i1 false)
   ret void
 }
@@ -233,17 +209,15 @@ define void @test_memset_p1_i16(i8 addrspace(1)* %dst, i8 %val) {
 define void @test_memset_p3_i64(i8 addrspace(3)* %dst, i8 %val) {
   ; CHECK-LABEL: name: test_memset_p3_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 256
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
   ; CHECK-NEXT:   G_MEMSET [[COPY]](p3), [[TRUNC]](s8), [[TRUNC1]](s32), 0 :: (store (s8) into %ir.dst, addrspace 3)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memset.p3i8.i64(i8 addrspace(3)* %dst, i8 %val, i64 256, i1 false)
   ret void
 }
@@ -251,16 +225,14 @@ define void @test_memset_p3_i64(i8 addrspace(3)* %dst, i8 %val) {
 define void @test_memset_p3_i32(i8 addrspace(3)* %dst, i8 %val) {
   ; CHECK-LABEL: name: test_memset_p3_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 256
   ; CHECK-NEXT:   G_MEMSET [[COPY]](p3), [[TRUNC]](s8), [[C]](s32), 0 :: (store (s8) into %ir.dst, addrspace 3)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memset.p3i8.i32(i8 addrspace(3)* %dst, i8 %val, i32 256, i1 false)
   ret void
 }
@@ -268,17 +240,15 @@ define void @test_memset_p3_i32(i8 addrspace(3)* %dst, i8 %val) {
 define void @test_memset_p3_i16(i8 addrspace(3)* %dst, i8 %val) {
   ; CHECK-LABEL: name: test_memset_p3_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 256
   ; CHECK-NEXT:   [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
   ; CHECK-NEXT:   G_MEMSET [[COPY]](p3), [[TRUNC]](s8), [[ZEXT]](s32), 0 :: (store (s8) into %ir.dst, addrspace 3)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
+  ; CHECK-NEXT:   SI_RETURN
   call void @llvm.memset.p3i8.i16(i8 addrspace(3)* %dst, i8 %val, i16 256, i1 false)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-metadata.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-metadata.ll
index 99a2875e8b160..a1a72c07cb631 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-metadata.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-metadata.ll
@@ -5,15 +5,12 @@
 define i32 @reloc_constant() {
   ; CHECK-LABEL: name: reloc_constant
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK:   liveins: $sgpr30_sgpr31
-  ; CHECK:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK:   [[INT0:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.reloc.constant), !0
   ; We cannot have any specific metadata check here as ConstantAsMetadata is printed as <raw_ptr_val>
   ; CHECK:   [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.reloc.constant), <0x{{[0-9a-f]+}}>
   ; CHECK:   [[SUM:%[0-9]+]]:_(s32) = G_ADD [[INT0]], [[INT1]]
   ; CHECK:   $vgpr0 = COPY [[SUM]](s32)
-  ; CHECK:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0
+  ; CHECK:   SI_RETURN implicit $vgpr0
   %val0 = call i32 @llvm.amdgcn.reloc.constant(metadata !0)
   %val1 = call i32 @llvm.amdgcn.reloc.constant(metadata i32 4)
   %res = add i32 %val0, %val1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll
index 88eab83433a8e..dab8b64e75f45 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-non-integral-address-spaces.ll
@@ -5,17 +5,13 @@
 define i8 addrspace(7)* @no_auto_constfold_gep() {
   ; CHECK-LABEL: name: no_auto_constfold_gep
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p7) = G_CONSTANT i64 0
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 123
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(p7) = G_PTR_ADD [[C]], [[C1]](s64)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[PTR_ADD]](p7)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY1]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %gep = getelementptr i8, i8 addrspace(7)* null, i64 123
   ret i8 addrspace(7)* %gep
 }
@@ -24,22 +20,18 @@ define i8 addrspace(7)* @no_auto_constfold_gep() {
 define <2 x i8 addrspace(7)*> @no_auto_constfold_gep_vector() {
   ; CHECK-LABEL: name: no_auto_constfold_gep_vector
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[C:%[0-9]+]]:_(p7) = G_CONSTANT i64 0
   ; CHECK-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x p7>) = G_BUILD_VECTOR [[C]](p7), [[C]](p7)
   ; CHECK-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 123
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64)
   ; CHECK-NEXT:   [[PTR_ADD:%[0-9]+]]:_(<2 x p7>) = G_PTR_ADD [[BUILD_VECTOR]], [[BUILD_VECTOR1]](<2 x s64>)
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(<2 x p7>) = COPY [[PTR_ADD]](<2 x p7>)
-  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x p7>)
+  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(<2 x p7>) = COPY [[PTR_ADD]](<2 x p7>)
+  ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x p7>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; CHECK-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; CHECK-NEXT:   $vgpr3 = COPY [[UV3]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY2]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
   %gep = getelementptr i8, <2 x i8 addrspace(7)*> zeroinitializer, <2 x i64> <i64 123, i64 123>
   ret <2 x i8 addrspace(7)*> %gep
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-ptrmask.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-ptrmask.ll
index a88c4224a2fa7..af0daceac6c61 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-ptrmask.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-ptrmask.ll
@@ -4,7 +4,7 @@
 define i8* @ptrmask_flat_i64(i8* %ptr, i64 %mask) {
   ; CHECK-LABEL: name: ptrmask_flat_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -12,13 +12,11 @@ define i8* @ptrmask_flat_i64(i8* %ptr, i64 %mask) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[PTRMASK:%[0-9]+]]:_(p0) = G_PTRMASK [[MV]], [[MV1]](s64)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[PTRMASK]](p0)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %masked = call i8* @llvm.ptrmask.p0i8.i64(i8* %ptr, i64 %mask)
   ret i8* %masked
 }
@@ -26,19 +24,17 @@ define i8* @ptrmask_flat_i64(i8* %ptr, i64 %mask) {
 define i8* @ptrmask_flat_i32(i8* %ptr, i32 %mask) {
   ; CHECK-LABEL: name: ptrmask_flat_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[PTRMASK:%[0-9]+]]:_(p0) = G_PTRMASK [[MV]], [[COPY2]](s32)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[PTRMASK]](p0)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %masked = call i8* @llvm.ptrmask.p0i8.i32(i8* %ptr, i32 %mask)
   ret i8* %masked
 }
@@ -46,20 +42,18 @@ define i8* @ptrmask_flat_i32(i8* %ptr, i32 %mask) {
 define i8* @ptrmask_flat_i16(i8* %ptr, i16 %mask) {
   ; CHECK-LABEL: name: ptrmask_flat_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[PTRMASK:%[0-9]+]]:_(p0) = G_PTRMASK [[MV]], [[TRUNC]](s16)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[PTRMASK]](p0)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %masked = call i8* @llvm.ptrmask.p0i8.i16(i8* %ptr, i16 %mask)
   ret i8* %masked
 }
@@ -67,20 +61,18 @@ define i8* @ptrmask_flat_i16(i8* %ptr, i16 %mask) {
 define i8* @ptrmask_flat_i1(i8* %ptr, i1 %mask) {
   ; CHECK-LABEL: name: ptrmask_flat_i1
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[PTRMASK:%[0-9]+]]:_(p0) = G_PTRMASK [[MV]], [[TRUNC]](s1)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[PTRMASK]](p0)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %masked = call i8* @llvm.ptrmask.p0i8.i1(i8* %ptr, i1 %mask)
   ret i8* %masked
 }
@@ -88,17 +80,15 @@ define i8* @ptrmask_flat_i1(i8* %ptr, i1 %mask) {
 define i8 addrspace(3)* @ptrmask_local_i64(i8 addrspace(3)* %ptr, i64 %mask) {
   ; CHECK-LABEL: name: ptrmask_local_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[PTRMASK:%[0-9]+]]:_(p3) = G_PTRMASK [[COPY]], [[MV]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[PTRMASK]](p3)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %masked = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i64(i8 addrspace(3)* %ptr, i64 %mask)
   ret i8 addrspace(3)* %masked
 }
@@ -106,15 +96,13 @@ define i8 addrspace(3)* @ptrmask_local_i64(i8 addrspace(3)* %ptr, i64 %mask) {
 define i8 addrspace(3)* @ptrmask_local_i32(i8 addrspace(3)* %ptr, i32 %mask) {
   ; CHECK-LABEL: name: ptrmask_local_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[PTRMASK:%[0-9]+]]:_(p3) = G_PTRMASK [[COPY]], [[COPY1]](s32)
   ; CHECK-NEXT:   $vgpr0 = COPY [[PTRMASK]](p3)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %masked = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i32(i8 addrspace(3)* %ptr, i32 %mask)
   ret i8 addrspace(3)* %masked
 }
@@ -122,16 +110,14 @@ define i8 addrspace(3)* @ptrmask_local_i32(i8 addrspace(3)* %ptr, i32 %mask) {
 define i8 addrspace(3)* @ptrmask_local_i16(i8 addrspace(3)* %ptr, i16 %mask) {
   ; CHECK-LABEL: name: ptrmask_local_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[PTRMASK:%[0-9]+]]:_(p3) = G_PTRMASK [[COPY]], [[TRUNC]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[PTRMASK]](p3)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %masked = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i16(i8 addrspace(3)* %ptr, i16 %mask)
   ret i8 addrspace(3)* %masked
 }
@@ -139,16 +125,14 @@ define i8 addrspace(3)* @ptrmask_local_i16(i8 addrspace(3)* %ptr, i16 %mask) {
 define i8 addrspace(3)* @ptrmask_local_i1(i8 addrspace(3)* %ptr, i1 %mask) {
   ; CHECK-LABEL: name: ptrmask_local_i1
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(p3) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[PTRMASK:%[0-9]+]]:_(p3) = G_PTRMASK [[COPY]], [[TRUNC]](s1)
   ; CHECK-NEXT:   $vgpr0 = COPY [[PTRMASK]](p3)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %masked = call i8 addrspace(3)* @llvm.ptrmask.p3i8.i1(i8 addrspace(3)* %ptr, i1 %mask)
   ret i8 addrspace(3)* %masked
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sat.ll
index 0db076c6b0fcf..4faea44d1582e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sat.ll
@@ -4,18 +4,16 @@
 define i16 @uaddsat_i16(i16 %lhs, i16 %rhs) {
   ; CHECK-LABEL: name: uaddsat_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[TRUNC]], [[TRUNC1]]
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UADDSAT]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i16 @llvm.uadd.sat.i16(i16 %lhs, i16 %rhs)
   ret i16 %res
 }
@@ -24,15 +22,13 @@ declare i16 @llvm.uadd.sat.i16(i16, i16)
 define i32 @uaddsat_i32(i32 %lhs, i32 %rhs) {
   ; CHECK-LABEL: name: uaddsat_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[UADDSAT:%[0-9]+]]:_(s32) = G_UADDSAT [[COPY]], [[COPY1]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[UADDSAT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i32 @llvm.uadd.sat.i32(i32 %lhs, i32 %rhs)
   ret i32 %res
 }
@@ -41,7 +37,7 @@ declare i32 @llvm.uadd.sat.i32(i32, i32)
 define i64 @uaddsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-LABEL: name: uaddsat_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -49,13 +45,11 @@ define i64 @uaddsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[UADDSAT:%[0-9]+]]:_(s64) = G_UADDSAT [[MV]], [[MV1]]
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UADDSAT]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call i64 @llvm.uadd.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %res
 }
@@ -64,7 +58,7 @@ declare i64 @llvm.uadd.sat.i64(i64, i64)
 define <2 x i32> @uaddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-LABEL: name: uaddsat_v2i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -72,13 +66,11 @@ define <2 x i32> @uaddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[UADDSAT:%[0-9]+]]:_(<2 x s32>) = G_UADDSAT [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UADDSAT]](<2 x s32>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
   ret <2 x i32> %res
 }
@@ -87,18 +79,16 @@ declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>)
 define i16 @saddsat_i16(i16 %lhs, i16 %rhs) {
   ; CHECK-LABEL: name: saddsat_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[TRUNC]], [[TRUNC1]]
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SADDSAT]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i16 @llvm.sadd.sat.i16(i16 %lhs, i16 %rhs)
   ret i16 %res
 }
@@ -107,15 +97,13 @@ declare i16 @llvm.sadd.sat.i16(i16, i16)
 define i32 @saddsat_i32(i32 %lhs, i32 %rhs) {
   ; CHECK-LABEL: name: saddsat_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SADDSAT:%[0-9]+]]:_(s32) = G_SADDSAT [[COPY]], [[COPY1]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[SADDSAT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i32 @llvm.sadd.sat.i32(i32 %lhs, i32 %rhs)
   ret i32 %res
 }
@@ -124,7 +112,7 @@ declare i32 @llvm.sadd.sat.i32(i32, i32)
 define i64 @saddsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-LABEL: name: saddsat_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -132,13 +120,11 @@ define i64 @saddsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SADDSAT:%[0-9]+]]:_(s64) = G_SADDSAT [[MV]], [[MV1]]
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SADDSAT]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call i64 @llvm.sadd.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %res
 }
@@ -147,7 +133,7 @@ declare i64 @llvm.sadd.sat.i64(i64, i64)
 define <2 x i32> @saddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-LABEL: name: saddsat_v2i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -155,13 +141,11 @@ define <2 x i32> @saddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SADDSAT:%[0-9]+]]:_(<2 x s32>) = G_SADDSAT [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SADDSAT]](<2 x s32>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
   ret <2 x i32> %res
 }
@@ -170,18 +154,16 @@ declare <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32>, <2 x i32>)
 define i16 @usubsat_i16(i16 %lhs, i16 %rhs) {
   ; CHECK-LABEL: name: usubsat_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[TRUNC]], [[TRUNC1]]
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[USUBSAT]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i16 @llvm.usub.sat.i16(i16 %lhs, i16 %rhs)
   ret i16 %res
 }
@@ -190,15 +172,13 @@ declare i16 @llvm.usub.sat.i16(i16, i16)
 define i32 @usubsat_i32(i32 %lhs, i32 %rhs) {
   ; CHECK-LABEL: name: usubsat_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[USUBSAT:%[0-9]+]]:_(s32) = G_USUBSAT [[COPY]], [[COPY1]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[USUBSAT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i32 @llvm.usub.sat.i32(i32 %lhs, i32 %rhs)
   ret i32 %res
 }
@@ -207,7 +187,7 @@ declare i32 @llvm.usub.sat.i32(i32, i32)
 define i64 @usubsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-LABEL: name: usubsat_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -215,13 +195,11 @@ define i64 @usubsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[USUBSAT:%[0-9]+]]:_(s64) = G_USUBSAT [[MV]], [[MV1]]
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[USUBSAT]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call i64 @llvm.usub.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %res
 }
@@ -230,7 +208,7 @@ declare i64 @llvm.usub.sat.i64(i64, i64)
 define <2 x i32> @usubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-LABEL: name: usubsat_v2i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -238,13 +216,11 @@ define <2 x i32> @usubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[USUBSAT:%[0-9]+]]:_(<2 x s32>) = G_USUBSAT [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[USUBSAT]](<2 x s32>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
   ret <2 x i32> %res
 }
@@ -253,18 +229,16 @@ declare <2 x i32> @llvm.usub.sat.v2i32(<2 x i32>, <2 x i32>)
 define i16 @ssubsat_i16(i16 %lhs, i16 %rhs) {
   ; CHECK-LABEL: name: ssubsat_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[TRUNC]], [[TRUNC1]]
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SSUBSAT]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i16 @llvm.ssub.sat.i16(i16 %lhs, i16 %rhs)
   ret i16 %res
 }
@@ -273,15 +247,13 @@ declare i16 @llvm.ssub.sat.i16(i16, i16)
 define i32 @ssubsat_i32(i32 %lhs, i32 %rhs) {
   ; CHECK-LABEL: name: ssubsat_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SSUBSAT:%[0-9]+]]:_(s32) = G_SSUBSAT [[COPY]], [[COPY1]]
   ; CHECK-NEXT:   $vgpr0 = COPY [[SSUBSAT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i32 @llvm.ssub.sat.i32(i32 %lhs, i32 %rhs)
   ret i32 %res
 }
@@ -290,7 +262,7 @@ declare i32 @llvm.ssub.sat.i32(i32, i32)
 define i64 @ssubsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-LABEL: name: ssubsat_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -298,13 +270,11 @@ define i64 @ssubsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SSUBSAT:%[0-9]+]]:_(s64) = G_SSUBSAT [[MV]], [[MV1]]
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SSUBSAT]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call i64 @llvm.ssub.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %res
 }
@@ -313,7 +283,7 @@ declare i64 @llvm.ssub.sat.i64(i64, i64)
 define <2 x i32> @ssubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-LABEL: name: ssubsat_v2i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -321,13 +291,11 @@ define <2 x i32> @ssubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SSUBSAT:%[0-9]+]]:_(<2 x s32>) = G_SSUBSAT [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SSUBSAT]](<2 x s32>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
   ret <2 x i32> %res
 }
@@ -336,18 +304,16 @@ declare <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32>, <2 x i32>)
 define i16 @ushlsat_i16(i16 %lhs, i16 %rhs) {
   ; CHECK-LABEL: name: ushlsat_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[USHLSAT:%[0-9]+]]:_(s16) = G_USHLSAT [[TRUNC]], [[TRUNC1]](s16)
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[USHLSAT]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i16 @llvm.ushl.sat.i16(i16 %lhs, i16 %rhs)
   ret i16 %res
 }
@@ -356,15 +322,13 @@ declare i16 @llvm.ushl.sat.i16(i16, i16)
 define i32 @ushlsat_i32(i32 %lhs, i32 %rhs) {
   ; CHECK-LABEL: name: ushlsat_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[USHLSAT:%[0-9]+]]:_(s32) = G_USHLSAT [[COPY]], [[COPY1]](s32)
   ; CHECK-NEXT:   $vgpr0 = COPY [[USHLSAT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i32 @llvm.ushl.sat.i32(i32 %lhs, i32 %rhs)
   ret i32 %res
 }
@@ -373,7 +337,7 @@ declare i32 @llvm.ushl.sat.i32(i32, i32)
 define i64 @ushlsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-LABEL: name: ushlsat_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -381,13 +345,11 @@ define i64 @ushlsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[USHLSAT:%[0-9]+]]:_(s64) = G_USHLSAT [[MV]], [[MV1]](s64)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[USHLSAT]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call i64 @llvm.ushl.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %res
 }
@@ -396,7 +358,7 @@ declare i64 @llvm.ushl.sat.i64(i64, i64)
 define <2 x i32> @ushlsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-LABEL: name: ushlsat_v2i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -404,13 +366,11 @@ define <2 x i32> @ushlsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[USHLSAT:%[0-9]+]]:_(<2 x s32>) = G_USHLSAT [[BUILD_VECTOR]], [[BUILD_VECTOR1]](<2 x s32>)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[USHLSAT]](<2 x s32>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call <2 x i32> @llvm.ushl.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
   ret <2 x i32> %res
 }
@@ -419,18 +379,16 @@ declare <2 x i32> @llvm.ushl.sat.v2i32(<2 x i32>, <2 x i32>)
 define i16 @sshlsat_i16(i16 %lhs, i16 %rhs) {
   ; CHECK-LABEL: name: sshlsat_i16
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; CHECK-NEXT:   [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SSHLSAT:%[0-9]+]]:_(s16) = G_SSHLSAT [[TRUNC]], [[TRUNC1]](s16)
   ; CHECK-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SSHLSAT]](s16)
   ; CHECK-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i16 @llvm.sshl.sat.i16(i16 %lhs, i16 %rhs)
   ret i16 %res
 }
@@ -439,15 +397,13 @@ declare i16 @llvm.sshl.sat.i16(i16, i16)
 define i32 @sshlsat_i32(i32 %lhs, i32 %rhs) {
   ; CHECK-LABEL: name: sshlsat_i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SSHLSAT:%[0-9]+]]:_(s32) = G_SSHLSAT [[COPY]], [[COPY1]](s32)
   ; CHECK-NEXT:   $vgpr0 = COPY [[SSHLSAT]](s32)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0
   %res = call i32 @llvm.sshl.sat.i32(i32 %lhs, i32 %rhs)
   ret i32 %res
 }
@@ -456,7 +412,7 @@ declare i32 @llvm.sshl.sat.i32(i32, i32)
 define i64 @sshlsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-LABEL: name: sshlsat_i64
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -464,13 +420,11 @@ define i64 @sshlsat_i64(i64 %lhs, i64 %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SSHLSAT:%[0-9]+]]:_(s64) = G_SSHLSAT [[MV]], [[MV1]](s64)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SSHLSAT]](s64)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call i64 @llvm.sshl.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %res
 }
@@ -479,7 +433,7 @@ declare i64 @llvm.sshl.sat.i64(i64, i64)
 define <2 x i32> @sshlsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-LABEL: name: sshlsat_v2i32
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -487,13 +441,11 @@ define <2 x i32> @sshlsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
   ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
   ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
   ; CHECK-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[SSHLSAT:%[0-9]+]]:_(<2 x s32>) = G_SSHLSAT [[BUILD_VECTOR]], [[BUILD_VECTOR1]](<2 x s32>)
   ; CHECK-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[SSHLSAT]](<2 x s32>)
   ; CHECK-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; CHECK-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+  ; CHECK-NEXT:   SI_RETURN implicit $vgpr0, implicit $vgpr1
   %res = call <2 x i32> @llvm.sshl.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
   ret <2 x i32> %res
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll
index 2616f89f764ea..b65d5842e4246 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-sibling-call.ll
@@ -5,15 +5,13 @@
 define fastcc i32 @i32_fastcc_i32_i32(i32 %arg0, i32 %arg1) #1 {
   ; GCN-LABEL: name: i32_fastcc_i32_i32
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
   ; GCN-NEXT:   $vgpr0 = COPY [[ADD]](s32)
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
   %add0 = add i32 %arg0, %arg1
   ret i32 %add0
 }
@@ -21,11 +19,10 @@ define fastcc i32 @i32_fastcc_i32_i32(i32 %arg0, i32 %arg1) #1 {
 define fastcc i32 @i32_fastcc_i32_i32_stack_object(i32 %arg0, i32 %arg1) #1 {
   ; GCN-LABEL: name: i32_fastcc_i32_i32_stack_object
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
   ; GCN-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
   ; GCN-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
@@ -33,8 +30,7 @@ define fastcc i32 @i32_fastcc_i32_i32_stack_object(i32 %arg0, i32 %arg1) #1 {
   ; GCN-NEXT:   G_STORE [[C]](s32), [[PTR_ADD]](p5) :: (volatile store (s32) into %ir.gep, addrspace 5)
   ; GCN-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
   ; GCN-NEXT:   $vgpr0 = COPY [[ADD]](s32)
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
   %alloca = alloca [16 x i32], align 4, addrspace(5)
   %gep = getelementptr inbounds [16 x i32], [16 x i32] addrspace(5)* %alloca, i32 0, i32 5
   store volatile i32 9, i32 addrspace(5)* %gep
@@ -45,17 +41,16 @@ define fastcc i32 @i32_fastcc_i32_i32_stack_object(i32 %arg0, i32 %arg1) #1 {
 define hidden fastcc i32 @sibling_call_i32_fastcc_i32_i32(i32 %a, i32 %b, i32 %c) #1 {
   ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32
   ; GCN-NEXT:   $vgpr0 = COPY [[COPY]](s32)
   ; GCN-NEXT:   $vgpr1 = COPY [[COPY1]](s32)
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY4]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
 entry:
   %ret = tail call fastcc i32 @i32_fastcc_i32_i32(i32 %a, i32 %b)
@@ -65,12 +60,11 @@ entry:
 define fastcc i32 @sibling_call_i32_fastcc_i32_i32_stack_object(i32 %a, i32 %b, i32 %c) #1 {
   ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_stack_object
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
   ; GCN-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
   ; GCN-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
@@ -79,8 +73,8 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_i32_stack_object(i32 %a, i32 %b,
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32
   ; GCN-NEXT:   $vgpr0 = COPY [[COPY]](s32)
   ; GCN-NEXT:   $vgpr1 = COPY [[COPY1]](s32)
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY4]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
 entry:
   %alloca = alloca [16 x i32], align 4, addrspace(5)
@@ -93,12 +87,11 @@ entry:
 define fastcc i32 @sibling_call_i32_fastcc_i32_i32_callee_stack_object(i32 %a, i32 %b, i32 %c) #1 {
   ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_callee_stack_object
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
   ; GCN-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
   ; GCN-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
@@ -107,8 +100,8 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_i32_callee_stack_object(i32 %a, i
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_stack_object
   ; GCN-NEXT:   $vgpr0 = COPY [[COPY]](s32)
   ; GCN-NEXT:   $vgpr1 = COPY [[COPY1]](s32)
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY4]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32_stack_object, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
 entry:
   %alloca = alloca [16 x i32], align 4, addrspace(5)
@@ -121,17 +114,16 @@ entry:
 define fastcc void @sibling_call_i32_fastcc_i32_i32_unused_result(i32 %a, i32 %b, i32 %c) #1 {
   ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_unused_result
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32
   ; GCN-NEXT:   $vgpr0 = COPY [[COPY]](s32)
   ; GCN-NEXT:   $vgpr1 = COPY [[COPY1]](s32)
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY4]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3
 entry:
   %ret = tail call fastcc i32 @i32_fastcc_i32_i32(i32 %a, i32 %b)
@@ -171,17 +163,15 @@ entry:
 define hidden fastcc i32 @i32_fastcc_i32_byval_i32(i32 %arg0, i32 addrspace(5)* byval(i32) align 4 %arg1) #1 {
   ; GCN-LABEL: name: i32_fastcc_i32_byval_i32
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX]](p5)
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY1]](p5) :: (dereferenceable load (s32) from %ir.arg1, addrspace 5)
   ; GCN-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[LOAD]]
   ; GCN-NEXT:   $vgpr0 = COPY [[ADD]](s32)
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
   %arg1.load = load i32, i32 addrspace(5)* %arg1, align 4
   %add0 = add i32 %arg0, %arg1.load
   ret i32 %add0
@@ -191,13 +181,12 @@ define hidden fastcc i32 @i32_fastcc_i32_byval_i32(i32 %arg0, i32 addrspace(5)*
 define fastcc i32 @sibling_call_i32_fastcc_i32_byval_i32_byval_parent(i32 %a, i32 addrspace(5)* byval(i32) %b.byval, i32 %c) #1 {
   ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_byval_i32_byval_parent
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[FRAME_INDEX:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(p5) = COPY [[FRAME_INDEX]](p5)
   ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GCN-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_byval_i32
   ; GCN-NEXT:   [[AMDGPU_WAVE_ADDRESS:%[0-9]+]]:_(p5) = G_AMDGPU_WAVE_ADDRESS $sgpr32
@@ -206,14 +195,13 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_byval_i32_byval_parent(i32 %a, i3
   ; GCN-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; GCN-NEXT:   G_MEMCPY [[PTR_ADD]](p5), [[COPY1]](p5), [[C1]](s32), 0 :: (dereferenceable store (s32) into stack, addrspace 5), (dereferenceable load (s32) from %ir.b.byval, addrspace 5)
   ; GCN-NEXT:   $vgpr0 = COPY [[COPY]](s32)
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY4]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
   ; GCN-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @i32_fastcc_i32_byval_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0
-  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   ADJCALLSTACKDOWN 0, 4, implicit-def $scc
-  ; GCN-NEXT:   $vgpr0 = COPY [[COPY5]](s32)
-  ; GCN-NEXT:   [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY6]], implicit $vgpr0
+  ; GCN-NEXT:   $vgpr0 = COPY [[COPY4]](s32)
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   %ret = tail call fastcc i32 @i32_fastcc_i32_byval_i32(i32 %a, i32 addrspace(5)* byval(i32) %b.byval)
   ret i32 %ret
@@ -225,7 +213,7 @@ entry:
 define fastcc i32 @sibling_call_i32_fastcc_i32_byval_i32(i32 %a, [32 x i32] %large) #1 {
   ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_byval_i32
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -262,7 +250,6 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_byval_i32(i32 %a, [32 x i32] %lar
   ; GCN-NEXT:   [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p5) :: (invariant load (s32) from %fixed-stack.2, align 16, addrspace 5)
   ; GCN-NEXT:   [[FRAME_INDEX1:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
   ; GCN-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.1, addrspace 5)
-  ; GCN-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
   ; GCN-NEXT:   [[INTTOPTR:%[0-9]+]]:_(p5) = G_INTTOPTR [[C]](s32)
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_byval_i32
@@ -270,8 +257,8 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_byval_i32(i32 %a, [32 x i32] %lar
   ; GCN-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
   ; GCN-NEXT:   G_MEMCPY [[FRAME_INDEX2]](p5), [[INTTOPTR]](p5), [[C1]](s32), 0 :: (dereferenceable store (s32) into %fixed-stack.0, align 16, addrspace 5), (dereferenceable load (s32) from `i32 addrspace(5)* inttoptr (i32 16 to i32 addrspace(5)*)`, align 16, addrspace 5)
   ; GCN-NEXT:   $vgpr0 = COPY [[COPY]](s32)
-  ; GCN-NEXT:   [[COPY32:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY32]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY31:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY31]](<4 x s32>)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_byval_i32, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3
 entry:
   %ret = tail call fastcc i32 @i32_fastcc_i32_byval_i32(i32 %a, i32 addrspace(5)* byval(i32) inttoptr (i32 16 to i32 addrspace(5)*))
@@ -281,7 +268,7 @@ entry:
 define fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %arg0, i32 %arg1, [32 x i32] %large) #1 {
   ; GCN-LABEL: name: i32_fastcc_i32_i32_a32i32
   ; GCN: bb.1 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -320,13 +307,11 @@ define fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %arg0, i32 %arg1, [32 x i32] %l
   ; GCN-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.1, addrspace 5)
   ; GCN-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.0
   ; GCN-NEXT:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.0, align 8, addrspace 5)
-  ; GCN-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
   ; GCN-NEXT:   [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[LOAD1]]
   ; GCN-NEXT:   [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[ADD1]], [[LOAD2]]
   ; GCN-NEXT:   $vgpr0 = COPY [[ADD2]](s32)
-  ; GCN-NEXT:   [[COPY32:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY31]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY32]], implicit $vgpr0
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
   %val_firststack = extractvalue [32 x i32] %large, 30
   %val_laststack = extractvalue [32 x i32] %large, 31
   %add0 = add i32 %arg0, %arg1
@@ -338,7 +323,7 @@ define fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %arg0, i32 %arg1, [32 x i32] %l
 define fastcc i32 @sibling_call_i32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x i32] %c) #1 {
   ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_a32i32
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -377,7 +362,6 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x
   ; GCN-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.4, addrspace 5)
   ; GCN-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
   ; GCN-NEXT:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.3, align 8, addrspace 5)
-  ; GCN-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_a32i32
   ; GCN-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
   ; GCN-NEXT:   G_STORE [[LOAD]](s32), [[FRAME_INDEX3]](p5) :: (store (s32) into %fixed-stack.2, align 16, addrspace 5)
@@ -416,8 +400,8 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x
   ; GCN-NEXT:   $vgpr28 = COPY [[COPY28]](s32)
   ; GCN-NEXT:   $vgpr29 = COPY [[COPY29]](s32)
   ; GCN-NEXT:   $vgpr30 = COPY [[COPY30]](s32)
-  ; GCN-NEXT:   [[COPY32:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY32]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY31:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY31]](<4 x s32>)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32_a32i32, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3
 entry:
   %ret = tail call fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x i32] %c)
@@ -427,7 +411,7 @@ entry:
 define fastcc i32 @sibling_call_i32_fastcc_i32_i32_a32i32_stack_object(i32 %a, i32 %b, [32 x i32] %c) #1 {
   ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_a32i32_stack_object
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -466,7 +450,6 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_i32_a32i32_stack_object(i32 %a, i
   ; GCN-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.4, addrspace 5)
   ; GCN-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
   ; GCN-NEXT:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.3, align 8, addrspace 5)
-  ; GCN-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
   ; GCN-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
   ; GCN-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
@@ -510,8 +493,8 @@ define fastcc i32 @sibling_call_i32_fastcc_i32_i32_a32i32_stack_object(i32 %a, i
   ; GCN-NEXT:   $vgpr28 = COPY [[COPY28]](s32)
   ; GCN-NEXT:   $vgpr29 = COPY [[COPY29]](s32)
   ; GCN-NEXT:   $vgpr30 = COPY [[COPY30]](s32)
-  ; GCN-NEXT:   [[COPY32:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY32]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY31:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY31]](<4 x s32>)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32_a32i32, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3
 entry:
   %alloca = alloca [16 x i32], align 4, addrspace(5)
@@ -527,11 +510,10 @@ entry:
 define fastcc i32 @no_sibling_call_callee_more_stack_space(i32 %a, i32 %b) #1 {
   ; GCN-LABEL: name: no_sibling_call_callee_more_stack_space
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; GCN-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GCN-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32_a32i32
@@ -576,14 +558,13 @@ define fastcc i32 @no_sibling_call_callee_more_stack_space(i32 %a, i32 %b) #1 {
   ; GCN-NEXT:   $vgpr28 = COPY [[C]](s32)
   ; GCN-NEXT:   $vgpr29 = COPY [[C]](s32)
   ; GCN-NEXT:   $vgpr30 = COPY [[C]](s32)
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]](<4 x s32>)
   ; GCN-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @i32_fastcc_i32_i32_a32i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   ADJCALLSTACKDOWN 0, 12, implicit-def $scc
-  ; GCN-NEXT:   $vgpr0 = COPY [[COPY4]](s32)
-  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY5]], implicit $vgpr0
+  ; GCN-NEXT:   $vgpr0 = COPY [[COPY3]](s32)
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   %ret = tail call fastcc i32 @i32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x i32] zeroinitializer)
   ret i32 %ret
@@ -593,27 +574,26 @@ entry:
 define fastcc i32 @sibling_call_i32_fastcc_i32_i32_other_call(i32 %a, i32 %b, i32 %c) #1 {
   ; GCN-LABEL: name: sibling_call_i32_fastcc_i32_i32_other_call
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; GCN-NEXT:   [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
   ; GCN-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @i32_fastcc_i32_i32
   ; GCN-NEXT:   $vgpr0 = COPY [[COPY]](s32)
   ; GCN-NEXT:   $vgpr1 = COPY [[COPY1]](s32)
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY4]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY3]](<4 x s32>)
   ; GCN-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @i32_fastcc_i32_i32, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $vgpr0
-  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr0
+  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
   ; GCN-NEXT:   [[GV1:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @sibling_call_i32_fastcc_i32_i32
   ; GCN-NEXT:   $vgpr0 = COPY [[COPY]](s32)
   ; GCN-NEXT:   $vgpr1 = COPY [[COPY1]](s32)
-  ; GCN-NEXT:   $vgpr2 = COPY [[COPY5]](s32)
-  ; GCN-NEXT:   [[COPY6:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY6]](<4 x s32>)
+  ; GCN-NEXT:   $vgpr2 = COPY [[COPY4]](s32)
+  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY5]](<4 x s32>)
   ; GCN-NEXT:   SI_TCRETURN [[GV1]](p0), @sibling_call_i32_fastcc_i32_i32, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $sgpr0_sgpr1_sgpr2_sgpr3
 entry:
   %other.call = tail call fastcc i32 @i32_fastcc_i32_i32(i32 %a, i32 %b)
@@ -626,7 +606,7 @@ entry:
 define fastcc i32 @sibling_call_stack_objecti32_fastcc_i32_i32_a32i32(i32 %a, i32 %b, [32 x i32] %c) #1 {
   ; GCN-LABEL: name: sibling_call_stack_objecti32_fastcc_i32_i32_a32i32
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -665,7 +645,6 @@ define fastcc i32 @sibling_call_stack_objecti32_fastcc_i32_i32_a32i32(i32 %a, i3
   ; GCN-NEXT:   [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p5) :: (invariant load (s32) from %fixed-stack.4, addrspace 5)
   ; GCN-NEXT:   [[FRAME_INDEX2:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
   ; GCN-NEXT:   [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p5) :: (invariant load (s32) from %fixed-stack.3, align 8, addrspace 5)
-  ; GCN-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
   ; GCN-NEXT:   [[FRAME_INDEX3:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
   ; GCN-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
@@ -709,8 +688,8 @@ define fastcc i32 @sibling_call_stack_objecti32_fastcc_i32_i32_a32i32(i32 %a, i3
   ; GCN-NEXT:   $vgpr28 = COPY [[COPY28]](s32)
   ; GCN-NEXT:   $vgpr29 = COPY [[COPY29]](s32)
   ; GCN-NEXT:   $vgpr30 = COPY [[COPY30]](s32)
-  ; GCN-NEXT:   [[COPY32:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY32]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY31:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY31]](<4 x s32>)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32_a32i32, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3
 entry:
   %alloca = alloca [16 x i32], align 4, addrspace(5)
@@ -723,7 +702,7 @@ entry:
 define fastcc i32 @sibling_call_stack_objecti32_fastcc_i32_i32_a32i32_larger_arg_area(i32 %a, i32 %b, [36 x i32] %c) #1 {
   ; GCN-LABEL: name: sibling_call_stack_objecti32_fastcc_i32_i32_a32i32_larger_arg_area
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
@@ -770,7 +749,6 @@ define fastcc i32 @sibling_call_stack_objecti32_fastcc_i32_i32_a32i32_larger_arg
   ; GCN-NEXT:   [[LOAD5:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX5]](p5) :: (invariant load (s32) from %fixed-stack.4, addrspace 5)
   ; GCN-NEXT:   [[FRAME_INDEX6:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
   ; GCN-NEXT:   [[LOAD6:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX6]](p5) :: (invariant load (s32) from %fixed-stack.3, align 8, addrspace 5)
-  ; GCN-NEXT:   [[COPY31:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
   ; GCN-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; GCN-NEXT:   [[FRAME_INDEX7:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
@@ -815,8 +793,8 @@ define fastcc i32 @sibling_call_stack_objecti32_fastcc_i32_i32_a32i32_larger_arg
   ; GCN-NEXT:   $vgpr28 = COPY [[C1]](s32)
   ; GCN-NEXT:   $vgpr29 = COPY [[C1]](s32)
   ; GCN-NEXT:   $vgpr30 = COPY [[C1]](s32)
-  ; GCN-NEXT:   [[COPY32:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY32]](<4 x s32>)
+  ; GCN-NEXT:   [[COPY31:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY31]](<4 x s32>)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @i32_fastcc_i32_i32_a32i32, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3
 entry:
   %alloca = alloca [16 x i32], align 4, addrspace(5)
@@ -831,7 +809,7 @@ declare hidden void @void_fastcc_multi_byval(i32 %a, [3 x i32] addrspace(5)* byv
 define fastcc void @sibling_call_fastcc_multi_byval(i32 %a, [64 x i32]) #1 {
   ; GCN-LABEL: name: sibling_call_fastcc_multi_byval
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -940,7 +918,6 @@ define fastcc void @sibling_call_fastcc_multi_byval(i32 %a, [64 x i32]) #1 {
   ; GCN-NEXT:   [[LOAD32:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX32]](p5) :: (invariant load (s32) from %fixed-stack.3, align 16, addrspace 5)
   ; GCN-NEXT:   [[FRAME_INDEX33:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
   ; GCN-NEXT:   [[LOAD33:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX33]](p5) :: (invariant load (s32) from %fixed-stack.2, addrspace 5)
-  ; GCN-NEXT:   [[COPY39:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
   ; GCN-NEXT:   [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
   ; GCN-NEXT:   [[FRAME_INDEX34:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca0
@@ -956,14 +933,14 @@ define fastcc void @sibling_call_fastcc_multi_byval(i32 %a, [64 x i32]) #1 {
   ; GCN-NEXT:   [[PTR_ADD2:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX35]], [[C3]](s32)
   ; GCN-NEXT:   G_STORE [[C1]](s64), [[PTR_ADD2]](p5) :: (store (s64) into %ir.alloca1 + 8, addrspace 5)
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @void_fastcc_multi_byval
-  ; GCN-NEXT:   [[COPY40:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GCN-NEXT:   [[COPY41:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GCN-NEXT:   [[COPY42:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GCN-NEXT:   [[COPY43:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GCN-NEXT:   [[COPY44:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GCN-NEXT:   [[COPY45:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GCN-NEXT:   [[COPY46:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GCN-NEXT:   [[COPY47:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GCN-NEXT:   [[COPY39:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GCN-NEXT:   [[COPY40:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GCN-NEXT:   [[COPY41:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GCN-NEXT:   [[COPY42:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GCN-NEXT:   [[COPY43:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GCN-NEXT:   [[COPY44:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GCN-NEXT:   [[COPY45:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GCN-NEXT:   [[COPY46:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GCN-NEXT:   [[FRAME_INDEX36:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.1
   ; GCN-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
   ; GCN-NEXT:   G_MEMCPY [[FRAME_INDEX36]](p5), [[FRAME_INDEX34]](p5), [[C4]](s32), 0 :: (dereferenceable store (s96) into %fixed-stack.1, align 16, addrspace 5), (dereferenceable load (s96) from %ir.alloca0, align 16, addrspace 5)
@@ -971,16 +948,16 @@ define fastcc void @sibling_call_fastcc_multi_byval(i32 %a, [64 x i32]) #1 {
   ; GCN-NEXT:   [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
   ; GCN-NEXT:   G_MEMCPY [[FRAME_INDEX37]](p5), [[FRAME_INDEX35]](p5), [[C5]](s32), 0 :: (dereferenceable store (s128) into %fixed-stack.0, addrspace 5), (dereferenceable load (s128) from %ir.alloca1, align 8, addrspace 5)
   ; GCN-NEXT:   $vgpr0 = COPY [[COPY8]](s32)
-  ; GCN-NEXT:   [[COPY48:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY48]](<4 x s32>)
-  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY40]](p4)
-  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY41]](p4)
-  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY42]](p4)
-  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY43]](s64)
-  ; GCN-NEXT:   $sgpr12 = COPY [[COPY44]](s32)
-  ; GCN-NEXT:   $sgpr13 = COPY [[COPY45]](s32)
-  ; GCN-NEXT:   $sgpr14 = COPY [[COPY46]](s32)
-  ; GCN-NEXT:   $vgpr31 = COPY [[COPY47]](s32)
+  ; GCN-NEXT:   [[COPY47:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY47]](<4 x s32>)
+  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY39]](p4)
+  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY40]](p4)
+  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY41]](p4)
+  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY42]](s64)
+  ; GCN-NEXT:   $sgpr12 = COPY [[COPY43]](s32)
+  ; GCN-NEXT:   $sgpr13 = COPY [[COPY44]](s32)
+  ; GCN-NEXT:   $sgpr14 = COPY [[COPY45]](s32)
+  ; GCN-NEXT:   $vgpr31 = COPY [[COPY46]](s32)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @void_fastcc_multi_byval, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
 entry:
   %alloca0 = alloca [3 x i32], align 16, addrspace(5)
@@ -997,7 +974,7 @@ declare hidden void @void_fastcc_byval_and_stack_passed([3 x i32] addrspace(5)*
 define fastcc void @sibling_call_byval_and_stack_passed(i32 %stack.out.arg, [64 x i32]) #1 {
   ; GCN-LABEL: name: sibling_call_byval_and_stack_passed
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $vgpr17, $vgpr18, $vgpr19, $vgpr20, $vgpr21, $vgpr22, $vgpr23, $vgpr24, $vgpr25, $vgpr26, $vgpr27, $vgpr28, $vgpr29, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -1106,7 +1083,6 @@ define fastcc void @sibling_call_byval_and_stack_passed(i32 %stack.out.arg, [64
   ; GCN-NEXT:   [[LOAD32:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX32]](p5) :: (invariant load (s32) from %fixed-stack.4, align 16, addrspace 5)
   ; GCN-NEXT:   [[FRAME_INDEX33:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.3
   ; GCN-NEXT:   [[LOAD33:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX33]](p5) :: (invariant load (s32) from %fixed-stack.3, addrspace 5)
-  ; GCN-NEXT:   [[COPY39:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 9
   ; GCN-NEXT:   [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
   ; GCN-NEXT:   [[FRAME_INDEX34:%[0-9]+]]:_(p5) = G_FRAME_INDEX %stack.0.alloca
@@ -1118,14 +1094,14 @@ define fastcc void @sibling_call_byval_and_stack_passed(i32 %stack.out.arg, [64
   ; GCN-NEXT:   [[PTR_ADD1:%[0-9]+]]:_(p5) = G_PTR_ADD [[FRAME_INDEX34]], [[C3]](s32)
   ; GCN-NEXT:   G_STORE [[C]](s32), [[PTR_ADD1]](p5) :: (store (s32) into %ir.alloca + 8, addrspace 5)
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @void_fastcc_byval_and_stack_passed
-  ; GCN-NEXT:   [[COPY40:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GCN-NEXT:   [[COPY41:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GCN-NEXT:   [[COPY42:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GCN-NEXT:   [[COPY43:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GCN-NEXT:   [[COPY44:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GCN-NEXT:   [[COPY45:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GCN-NEXT:   [[COPY46:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GCN-NEXT:   [[COPY47:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GCN-NEXT:   [[COPY39:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GCN-NEXT:   [[COPY40:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GCN-NEXT:   [[COPY41:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GCN-NEXT:   [[COPY42:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GCN-NEXT:   [[COPY43:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GCN-NEXT:   [[COPY44:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GCN-NEXT:   [[COPY45:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GCN-NEXT:   [[COPY46:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GCN-NEXT:   [[FRAME_INDEX35:%[0-9]+]]:_(p5) = G_FRAME_INDEX %fixed-stack.2
   ; GCN-NEXT:   [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
   ; GCN-NEXT:   G_MEMCPY [[FRAME_INDEX35]](p5), [[FRAME_INDEX34]](p5), [[C4]](s32), 0 :: (dereferenceable store (s96) into %fixed-stack.2, align 16, addrspace 5), (dereferenceable load (s96) from %ir.alloca, align 16, addrspace 5)
@@ -1164,16 +1140,16 @@ define fastcc void @sibling_call_byval_and_stack_passed(i32 %stack.out.arg, [64
   ; GCN-NEXT:   $vgpr28 = COPY [[C1]](s32)
   ; GCN-NEXT:   $vgpr29 = COPY [[C1]](s32)
   ; GCN-NEXT:   $vgpr30 = COPY [[C1]](s32)
-  ; GCN-NEXT:   [[COPY48:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY48]](<4 x s32>)
-  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY40]](p4)
-  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY41]](p4)
-  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY42]](p4)
-  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY43]](s64)
-  ; GCN-NEXT:   $sgpr12 = COPY [[COPY44]](s32)
-  ; GCN-NEXT:   $sgpr13 = COPY [[COPY45]](s32)
-  ; GCN-NEXT:   $sgpr14 = COPY [[COPY46]](s32)
-  ; GCN-NEXT:   $vgpr31 = COPY [[COPY47]](s32)
+  ; GCN-NEXT:   [[COPY47:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY47]](<4 x s32>)
+  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY39]](p4)
+  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY40]](p4)
+  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY41]](p4)
+  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY42]](s64)
+  ; GCN-NEXT:   $sgpr12 = COPY [[COPY43]](s32)
+  ; GCN-NEXT:   $sgpr13 = COPY [[COPY44]](s32)
+  ; GCN-NEXT:   $sgpr14 = COPY [[COPY45]](s32)
+  ; GCN-NEXT:   $vgpr31 = COPY [[COPY46]](s32)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @void_fastcc_byval_and_stack_passed, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18, implicit $vgpr19, implicit $vgpr20, implicit $vgpr21, implicit $vgpr22, implicit $vgpr23, implicit $vgpr24, implicit $vgpr25, implicit $vgpr26, implicit $vgpr27, implicit $vgpr28, implicit $vgpr29, implicit $vgpr30, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
 entry:
   %alloca = alloca [3 x i32], align 16, addrspace(5)
@@ -1187,7 +1163,7 @@ declare hidden fastcc i64 @i64_fastcc_i64(i64 %arg0)
 define hidden fastcc i64 @sibling_call_i64_fastcc_i64(i64 %a) #1 {
   ; GCN-LABEL: name: sibling_call_i64_fastcc_i64
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -1200,29 +1176,28 @@ define hidden fastcc i64 @sibling_call_i64_fastcc_i64(i64 %a) #1 {
   ; GCN-NEXT:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; GCN-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
-  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @i64_fastcc_i64
-  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GCN-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
   ; GCN-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; GCN-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; GCN-NEXT:   [[COPY19:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY19]](<4 x s32>)
-  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY11]](p4)
-  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY12]](p4)
-  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY13]](p4)
-  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY14]](s64)
-  ; GCN-NEXT:   $sgpr12 = COPY [[COPY15]](s32)
-  ; GCN-NEXT:   $sgpr13 = COPY [[COPY16]](s32)
-  ; GCN-NEXT:   $sgpr14 = COPY [[COPY17]](s32)
-  ; GCN-NEXT:   $vgpr31 = COPY [[COPY18]](s32)
+  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
+  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY12]](p4)
+  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GCN-NEXT:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GCN-NEXT:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GCN-NEXT:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GCN-NEXT:   $vgpr31 = COPY [[COPY17]](s32)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @i64_fastcc_i64, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
 entry:
   %ret = tail call fastcc i64 @i64_fastcc_i64(i64 %a)
@@ -1234,7 +1209,7 @@ declare hidden fastcc i8 addrspace(1)* @p1i8_fastcc_p1i8(i8 addrspace(1)* %arg0)
 define hidden fastcc i8 addrspace(1)* @sibling_call_p1i8_fastcc_p1i8(i8 addrspace(1)* %a) #1 {
   ; GCN-LABEL: name: sibling_call_p1i8_fastcc_p1i8
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -1247,29 +1222,28 @@ define hidden fastcc i8 addrspace(1)* @sibling_call_p1i8_fastcc_p1i8(i8 addrspac
   ; GCN-NEXT:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr1
   ; GCN-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
-  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @p1i8_fastcc_p1i8
-  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GCN-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](p1)
   ; GCN-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; GCN-NEXT:   $vgpr1 = COPY [[UV1]](s32)
-  ; GCN-NEXT:   [[COPY19:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY19]](<4 x s32>)
-  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY11]](p4)
-  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY12]](p4)
-  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY13]](p4)
-  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY14]](s64)
-  ; GCN-NEXT:   $sgpr12 = COPY [[COPY15]](s32)
-  ; GCN-NEXT:   $sgpr13 = COPY [[COPY16]](s32)
-  ; GCN-NEXT:   $sgpr14 = COPY [[COPY17]](s32)
-  ; GCN-NEXT:   $vgpr31 = COPY [[COPY18]](s32)
+  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
+  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY12]](p4)
+  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GCN-NEXT:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GCN-NEXT:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GCN-NEXT:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GCN-NEXT:   $vgpr31 = COPY [[COPY17]](s32)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @p1i8_fastcc_p1i8, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
 entry:
   %ret = tail call fastcc i8 addrspace(1)* @p1i8_fastcc_p1i8(i8 addrspace(1)* %a)
@@ -1281,7 +1255,7 @@ declare hidden fastcc i16 @i16_fastcc_i16(i16 %arg0)
 define hidden fastcc i16 @sibling_call_i16_fastcc_i16(i16 %a) #1 {
   ; GCN-LABEL: name: sibling_call_i16_fastcc_i16
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -1293,28 +1267,27 @@ define hidden fastcc i16 @sibling_call_i16_fastcc_i16(i16 %a) #1 {
   ; GCN-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
   ; GCN-NEXT:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
-  ; GCN-NEXT:   [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @i16_fastcc_i16
-  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GCN-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GCN-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
   ; GCN-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
-  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
-  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
-  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY12]](p4)
-  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
-  ; GCN-NEXT:   $sgpr12 = COPY [[COPY14]](s32)
-  ; GCN-NEXT:   $sgpr13 = COPY [[COPY15]](s32)
-  ; GCN-NEXT:   $sgpr14 = COPY [[COPY16]](s32)
-  ; GCN-NEXT:   $vgpr31 = COPY [[COPY17]](s32)
+  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
+  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
+  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
+  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
+  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
+  ; GCN-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
+  ; GCN-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
+  ; GCN-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
+  ; GCN-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @i16_fastcc_i16, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
 entry:
   %ret = tail call fastcc i16 @i16_fastcc_i16(i16 %a)
@@ -1326,7 +1299,7 @@ declare hidden fastcc half @f16_fastcc_f16(half %arg0)
 define hidden fastcc half @sibling_call_f16_fastcc_f16(half %a) #1 {
   ; GCN-LABEL: name: sibling_call_f16_fastcc_f16
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -1338,28 +1311,27 @@ define hidden fastcc half @sibling_call_f16_fastcc_f16(half %a) #1 {
   ; GCN-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
   ; GCN-NEXT:   [[COPY8:%[0-9]+]]:_(s32) = COPY $vgpr0
   ; GCN-NEXT:   [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY8]](s32)
-  ; GCN-NEXT:   [[COPY9:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @f16_fastcc_f16
-  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GCN-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GCN-NEXT:   [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC]](s16)
   ; GCN-NEXT:   $vgpr0 = COPY [[ANYEXT]](s32)
-  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
-  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
-  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
-  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY12]](p4)
-  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
-  ; GCN-NEXT:   $sgpr12 = COPY [[COPY14]](s32)
-  ; GCN-NEXT:   $sgpr13 = COPY [[COPY15]](s32)
-  ; GCN-NEXT:   $sgpr14 = COPY [[COPY16]](s32)
-  ; GCN-NEXT:   $vgpr31 = COPY [[COPY17]](s32)
+  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
+  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
+  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
+  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
+  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
+  ; GCN-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
+  ; GCN-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
+  ; GCN-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
+  ; GCN-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @f16_fastcc_f16, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
 entry:
   %ret = tail call fastcc half @f16_fastcc_f16(half %a)
@@ -1371,7 +1343,7 @@ declare hidden fastcc <3 x i16> @v3i16_fastcc_v3i16(<3 x i16> %arg0)
 define hidden fastcc <3 x i16> @sibling_call_v3i16_fastcc_v3i16(<3 x i16> %a) #1 {
   ; GCN-LABEL: name: sibling_call_v3i16_fastcc_v3i16
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -1386,32 +1358,31 @@ define hidden fastcc <3 x i16> @sibling_call_v3i16_fastcc_v3i16(<3 x i16> %a) #1
   ; GCN-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY8]](<2 x s16>), [[COPY9]](<2 x s16>)
   ; GCN-NEXT:   [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s16>)
   ; GCN-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[UV]](s16), [[UV1]](s16), [[UV2]](s16)
-  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @v3i16_fastcc_v3i16
-  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GCN-NEXT:   [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<3 x s16>)
   ; GCN-NEXT:   [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
   ; GCN-NEXT:   [[BUILD_VECTOR1:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UV4]](s16), [[UV5]](s16), [[UV6]](s16), [[DEF]](s16)
   ; GCN-NEXT:   [[UV7:%[0-9]+]]:_(<2 x s16>), [[UV8:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[BUILD_VECTOR1]](<4 x s16>)
   ; GCN-NEXT:   $vgpr0 = COPY [[UV7]](<2 x s16>)
   ; GCN-NEXT:   $vgpr1 = COPY [[UV8]](<2 x s16>)
-  ; GCN-NEXT:   [[COPY19:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY19]](<4 x s32>)
-  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY11]](p4)
-  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY12]](p4)
-  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY13]](p4)
-  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY14]](s64)
-  ; GCN-NEXT:   $sgpr12 = COPY [[COPY15]](s32)
-  ; GCN-NEXT:   $sgpr13 = COPY [[COPY16]](s32)
-  ; GCN-NEXT:   $sgpr14 = COPY [[COPY17]](s32)
-  ; GCN-NEXT:   $vgpr31 = COPY [[COPY18]](s32)
+  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
+  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY12]](p4)
+  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GCN-NEXT:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GCN-NEXT:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GCN-NEXT:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GCN-NEXT:   $vgpr31 = COPY [[COPY17]](s32)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @v3i16_fastcc_v3i16, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
 entry:
   %ret = tail call fastcc <3 x i16> @v3i16_fastcc_v3i16(<3 x i16> %a)
@@ -1423,7 +1394,7 @@ declare hidden fastcc <4 x i16> @v4i16_fastcc_v4i16(<4 x i16> %arg0)
 define hidden fastcc <4 x i16> @sibling_call_v4i16_fastcc_v4i16(<4 x i16> %a) #1 {
   ; GCN-LABEL: name: sibling_call_v4i16_fastcc_v4i16
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -1436,29 +1407,28 @@ define hidden fastcc <4 x i16> @sibling_call_v4i16_fastcc_v4i16(<4 x i16> %a) #1
   ; GCN-NEXT:   [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
   ; GCN-NEXT:   [[COPY9:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
   ; GCN-NEXT:   [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY8]](<2 x s16>), [[COPY9]](<2 x s16>)
-  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @v4i16_fastcc_v4i16
-  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GCN-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GCN-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GCN-NEXT:   [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s16>)
   ; GCN-NEXT:   $vgpr0 = COPY [[UV]](<2 x s16>)
   ; GCN-NEXT:   $vgpr1 = COPY [[UV1]](<2 x s16>)
-  ; GCN-NEXT:   [[COPY19:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY19]](<4 x s32>)
-  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY11]](p4)
-  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY12]](p4)
-  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY13]](p4)
-  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY14]](s64)
-  ; GCN-NEXT:   $sgpr12 = COPY [[COPY15]](s32)
-  ; GCN-NEXT:   $sgpr13 = COPY [[COPY16]](s32)
-  ; GCN-NEXT:   $sgpr14 = COPY [[COPY17]](s32)
-  ; GCN-NEXT:   $vgpr31 = COPY [[COPY18]](s32)
+  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY18]](<4 x s32>)
+  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY10]](p4)
+  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY11]](p4)
+  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY12]](p4)
+  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY13]](s64)
+  ; GCN-NEXT:   $sgpr12 = COPY [[COPY14]](s32)
+  ; GCN-NEXT:   $sgpr13 = COPY [[COPY15]](s32)
+  ; GCN-NEXT:   $sgpr14 = COPY [[COPY16]](s32)
+  ; GCN-NEXT:   $vgpr31 = COPY [[COPY17]](s32)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @v4i16_fastcc_v4i16, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
 entry:
   %ret = tail call fastcc <4 x i16> @v4i16_fastcc_v4i16(<4 x i16> %a)
@@ -1470,7 +1440,7 @@ declare hidden fastcc <2 x i64> @v2i64_fastcc_v2i64(<2 x i64> %arg0)
 define hidden fastcc <2 x i64> @sibling_call_v2i64_fastcc_v2i64(<2 x i64> %a) #1 {
   ; GCN-LABEL: name: sibling_call_v2i64_fastcc_v2i64
   ; GCN: bb.1.entry:
-  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; GCN-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -1487,31 +1457,30 @@ define hidden fastcc <2 x i64> @sibling_call_v2i64_fastcc_v2i64(<2 x i64> %a) #1
   ; GCN-NEXT:   [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY8]](s32), [[COPY9]](s32)
   ; GCN-NEXT:   [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[COPY10]](s32), [[COPY11]](s32)
   ; GCN-NEXT:   [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[MV]](s64), [[MV1]](s64)
-  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GCN-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @v2i64_fastcc_v2i64
-  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; GCN-NEXT:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; GCN-NEXT:   [[COPY20:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; GCN-NEXT:   [[COPY12:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; GCN-NEXT:   [[COPY13:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; GCN-NEXT:   [[COPY14:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; GCN-NEXT:   [[COPY15:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; GCN-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; GCN-NEXT:   [[COPY17:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; GCN-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; GCN-NEXT:   [[COPY19:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
   ; GCN-NEXT:   [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s64>)
   ; GCN-NEXT:   $vgpr0 = COPY [[UV]](s32)
   ; GCN-NEXT:   $vgpr1 = COPY [[UV1]](s32)
   ; GCN-NEXT:   $vgpr2 = COPY [[UV2]](s32)
   ; GCN-NEXT:   $vgpr3 = COPY [[UV3]](s32)
-  ; GCN-NEXT:   [[COPY21:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY21]](<4 x s32>)
-  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY13]](p4)
-  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY14]](p4)
-  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY15]](p4)
-  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY16]](s64)
-  ; GCN-NEXT:   $sgpr12 = COPY [[COPY17]](s32)
-  ; GCN-NEXT:   $sgpr13 = COPY [[COPY18]](s32)
-  ; GCN-NEXT:   $sgpr14 = COPY [[COPY19]](s32)
-  ; GCN-NEXT:   $vgpr31 = COPY [[COPY20]](s32)
+  ; GCN-NEXT:   [[COPY20:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; GCN-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY20]](<4 x s32>)
+  ; GCN-NEXT:   $sgpr4_sgpr5 = COPY [[COPY12]](p4)
+  ; GCN-NEXT:   $sgpr6_sgpr7 = COPY [[COPY13]](p4)
+  ; GCN-NEXT:   $sgpr8_sgpr9 = COPY [[COPY14]](p4)
+  ; GCN-NEXT:   $sgpr10_sgpr11 = COPY [[COPY15]](s64)
+  ; GCN-NEXT:   $sgpr12 = COPY [[COPY16]](s32)
+  ; GCN-NEXT:   $sgpr13 = COPY [[COPY17]](s32)
+  ; GCN-NEXT:   $sgpr14 = COPY [[COPY18]](s32)
+  ; GCN-NEXT:   $vgpr31 = COPY [[COPY19]](s32)
   ; GCN-NEXT:   SI_TCRETURN [[GV]](p0), @v2i64_fastcc_v2i64, 0, csr_amdgpu_highregs, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
 entry:
   %ret = tail call fastcc <2 x i64> @v2i64_fastcc_v2i64(<2 x i64> %a)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-tail-call.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-tail-call.ll
index 1147647ec514a..87b53c69f6120 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-tail-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-tail-call.ll
@@ -6,7 +6,7 @@ declare hidden void @external_void_func_void()
 define void @tail_call_void_func_void() {
   ; CHECK-LABEL: name: tail_call_void_func_void
   ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
+  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11
   ; CHECK-NEXT: {{  $}}
   ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
   ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
@@ -16,26 +16,25 @@ define void @tail_call_void_func_void() {
   ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
   ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
   ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; CHECK-NEXT:   [[GV:%[0-9]+]]:sreg_64(p0) = G_GLOBAL_VALUE @external_void_func_void
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
+  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:_(p4) = COPY [[COPY7]]
+  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY6]]
+  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY5]]
+  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(s64) = COPY [[COPY4]]
+  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s32) = COPY [[COPY3]]
+  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY2]]
+  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY1]]
+  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY16]](<4 x s32>)
+  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY8]](p4)
+  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY9]](p4)
+  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY10]](p4)
+  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY11]](s64)
+  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY12]](s32)
+  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY13]](s32)
+  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY14]](s32)
+  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY15]](s32)
   ; CHECK-NEXT:   SI_TCRETURN [[GV]](p0), @external_void_func_void, 0, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31
   tail call void @external_void_func_void()
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-vector-args-gfx7.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-vector-args-gfx7.mir
index 075e09d18df61..e09f17095f2fa 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-vector-args-gfx7.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-vector-args-gfx7.mir
@@ -36,7 +36,7 @@
 name: and_v2i16
 body: |
   bb.1:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
 
     ; GFX7-LABEL: name: and_v2i16
     ; GFX7: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -47,15 +47,13 @@ body: |
     ; GFX7-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
     ; GFX7-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[COPY2]](s32), [[COPY3]](s32)
     ; GFX7-NEXT: [[TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_TRUNC [[BUILD_VECTOR1]](<2 x s32>)
-    ; GFX7-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX7-NEXT: [[AND:%[0-9]+]]:_(<2 x s16>) = G_AND [[TRUNC]], [[TRUNC1]]
     ; GFX7-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[AND]](<2 x s16>)
     ; GFX7-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
     ; GFX7-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
     ; GFX7-NEXT: $vgpr0 = COPY [[BITCAST]](s32)
     ; GFX7-NEXT: $vgpr1 = COPY [[LSHR]](s32)
-    ; GFX7-NEXT: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-    ; GFX7-NEXT: S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+    ; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
     %3:_(s32) = COPY $vgpr0
     %4:_(s32) = COPY $vgpr1
     %5:_(<2 x s32>) = G_BUILD_VECTOR %3(s32), %4(s32)
@@ -64,15 +62,13 @@ body: |
     %7:_(s32) = COPY $vgpr3
     %8:_(<2 x s32>) = G_BUILD_VECTOR %6(s32), %7(s32)
     %1:_(<2 x s16>) = G_TRUNC %8(<2 x s32>)
-    %2:sgpr_64 = COPY $sgpr30_sgpr31
     %9:_(<2 x s16>) = G_AND %0, %1
     %13:_(s16), %14:_(s16) = G_UNMERGE_VALUES %9(<2 x s16>)
     %11:_(s32) = G_ANYEXT %13(s16)
     %12:_(s32) = G_ANYEXT %14(s16)
     $vgpr0 = COPY %11(s32)
     $vgpr1 = COPY %12(s32)
-    %10:ccr_sgpr_64 = COPY %2
-    S_SETPC_B64_return %10, implicit $vgpr0, implicit $vgpr1
+    SI_RETURN implicit $vgpr0, implicit $vgpr1
 
 ...
 
@@ -80,7 +76,7 @@ body: |
 name: add_v3i16
 body: |
   bb.1:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
 
     ; GFX7-LABEL: name: add_v3i16
     ; GFX7: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -89,15 +85,13 @@ body: |
     ; GFX7-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
     ; GFX7-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX7-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
-    ; GFX7-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX7-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY3]]
     ; GFX7-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY4]]
     ; GFX7-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY5]]
     ; GFX7-NEXT: $vgpr0 = COPY [[ADD]](s32)
     ; GFX7-NEXT: $vgpr1 = COPY [[ADD1]](s32)
     ; GFX7-NEXT: $vgpr2 = COPY [[ADD2]](s32)
-    ; GFX7-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX7-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     %3:_(s32) = COPY $vgpr0
     %4:_(s32) = COPY $vgpr1
     %5:_(s32) = COPY $vgpr2
@@ -108,7 +102,6 @@ body: |
     %9:_(s32) = COPY $vgpr5
     %10:_(<3 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32), %9(s32)
     %1:_(<3 x s16>) = G_TRUNC %10(<3 x s32>)
-    %2:sgpr_64 = COPY $sgpr30_sgpr31
     %11:_(<3 x s16>) = G_ADD %0, %1
     %16:_(s16), %17:_(s16), %18:_(s16) = G_UNMERGE_VALUES %11(<3 x s16>)
     %13:_(s32) = G_ANYEXT %16(s16)
@@ -117,8 +110,7 @@ body: |
     $vgpr0 = COPY %13(s32)
     $vgpr1 = COPY %14(s32)
     $vgpr2 = COPY %15(s32)
-    %12:ccr_sgpr_64 = COPY %2
-    S_SETPC_B64_return %12, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
 
 ...
 
@@ -126,7 +118,7 @@ body: |
 name: shl_v3i16
 body: |
   bb.1:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
 
     ; GFX7-LABEL: name: shl_v3i16
     ; GFX7: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -135,7 +127,6 @@ body: |
     ; GFX7-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
     ; GFX7-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4
     ; GFX7-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5
-    ; GFX7-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX7-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
     ; GFX7-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
     ; GFX7-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[AND]](s32)
@@ -146,8 +137,7 @@ body: |
     ; GFX7-NEXT: $vgpr0 = COPY [[SHL]](s32)
     ; GFX7-NEXT: $vgpr1 = COPY [[SHL1]](s32)
     ; GFX7-NEXT: $vgpr2 = COPY [[SHL2]](s32)
-    ; GFX7-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX7-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    ; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
     %3:_(s32) = COPY $vgpr0
     %4:_(s32) = COPY $vgpr1
     %5:_(s32) = COPY $vgpr2
@@ -158,7 +148,6 @@ body: |
     %9:_(s32) = COPY $vgpr5
     %10:_(<3 x s32>) = G_BUILD_VECTOR %7(s32), %8(s32), %9(s32)
     %1:_(<3 x s16>) = G_TRUNC %10(<3 x s32>)
-    %2:sgpr_64 = COPY $sgpr30_sgpr31
     %11:_(<3 x s16>) = G_SHL %0, %1(<3 x s16>)
     %16:_(s16), %17:_(s16), %18:_(s16) = G_UNMERGE_VALUES %11(<3 x s16>)
     %13:_(s32) = G_ANYEXT %16(s16)
@@ -167,8 +156,7 @@ body: |
     $vgpr0 = COPY %13(s32)
     $vgpr1 = COPY %14(s32)
     $vgpr2 = COPY %15(s32)
-    %12:ccr_sgpr_64 = COPY %2
-    S_SETPC_B64_return %12, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
+    SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2
 
 ...
 
@@ -176,7 +164,7 @@ body: |
 name: fma_v4f16
 body: |
   bb.1:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11
 
     ; GFX7-LABEL: name: fma_v4f16
     ; GFX7: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
@@ -191,7 +179,6 @@ body: |
     ; GFX7-NEXT: [[COPY9:%[0-9]+]]:_(s32) = COPY $vgpr9
     ; GFX7-NEXT: [[COPY10:%[0-9]+]]:_(s32) = COPY $vgpr10
     ; GFX7-NEXT: [[COPY11:%[0-9]+]]:_(s32) = COPY $vgpr11
-    ; GFX7-NEXT: [[COPY12:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX7-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
     ; GFX7-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
     ; GFX7-NEXT: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
@@ -232,8 +219,7 @@ body: |
     ; GFX7-NEXT: $vgpr1 = COPY [[ANYEXT1]](s32)
     ; GFX7-NEXT: $vgpr2 = COPY [[ANYEXT2]](s32)
     ; GFX7-NEXT: $vgpr3 = COPY [[ANYEXT3]](s32)
-    ; GFX7-NEXT: [[COPY13:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY12]]
-    ; GFX7-NEXT: S_SETPC_B64_return [[COPY13]], implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    ; GFX7-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
     %4:_(s32) = COPY $vgpr0
     %5:_(s32) = COPY $vgpr1
     %6:_(s32) = COPY $vgpr2
@@ -252,7 +238,6 @@ body: |
     %17:_(s32) = COPY $vgpr11
     %18:_(<4 x s32>) = G_BUILD_VECTOR %14(s32), %15(s32), %16(s32), %17(s32)
     %2:_(<4 x s16>) = G_TRUNC %18(<4 x s32>)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %19:_(<4 x s16>) = G_FMA %0, %1, %2
     %25:_(s16), %26:_(s16), %27:_(s16), %28:_(s16) = G_UNMERGE_VALUES %19(<4 x s16>)
     %21:_(s32) = G_ANYEXT %25(s16)
@@ -263,8 +248,7 @@ body: |
     $vgpr1 = COPY %22(s32)
     $vgpr2 = COPY %23(s32)
     $vgpr3 = COPY %24(s32)
-    %20:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %20, implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
+    SI_RETURN implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
 ...
 
 ---

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-vector-args-gfx8-plus.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-vector-args-gfx8-plus.mir
index 7e00c75a2a515..51fbb51a29643 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-vector-args-gfx8-plus.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-vector-args-gfx8-plus.mir
@@ -37,38 +37,32 @@
 name: and_v2i16
 body: |
   bb.1:
-    liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1
 
     ; GFX8-LABEL: name: and_v2i16
     ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX8-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
-    ; GFX8-NEXT: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX8-NEXT: [[AND:%[0-9]+]]:_(<2 x s16>) = G_AND [[COPY]], [[COPY1]]
     ; GFX8-NEXT: $vgpr0 = COPY [[AND]](<2 x s16>)
-    ; GFX8-NEXT: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-    ; GFX8-NEXT: S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+    ; GFX8-NEXT: SI_RETURN implicit $vgpr0
     ; GFX9-LABEL: name: and_v2i16
     ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
-    ; GFX9-NEXT: [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[AND:%[0-9]+]]:_(<2 x s16>) = G_AND [[COPY]], [[COPY1]]
     ; GFX9-NEXT: $vgpr0 = COPY [[AND]](<2 x s16>)
-    ; GFX9-NEXT: [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+    ; GFX9-NEXT: SI_RETURN implicit $vgpr0
     %0:_(<2 x s16>) = COPY $vgpr0
     %1:_(<2 x s16>) = COPY $vgpr1
-    %2:sgpr_64 = COPY $sgpr30_sgpr31
     %3:_(<2 x s16>) = G_AND %0, %1
     $vgpr0 = COPY %3(<2 x s16>)
-    %4:ccr_sgpr_64 = COPY %2
-    S_SETPC_B64_return %4, implicit $vgpr0
+    SI_RETURN implicit $vgpr0
 ...
 
 ---
 name: add_v3i16
 body: |
   bb.1:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
 
     ; GFX8-LABEL: name: add_v3i16
     ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
@@ -88,7 +82,6 @@ body: |
     ; GFX8-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
     ; GFX8-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY3]](<2 x s16>)
     ; GFX8-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
-    ; GFX8-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX8-NEXT: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[TRUNC3]]
     ; GFX8-NEXT: [[ADD1:%[0-9]+]]:_(s16) = G_ADD [[TRUNC1]], [[TRUNC4]]
     ; GFX8-NEXT: [[ADD2:%[0-9]+]]:_(s16) = G_ADD [[TRUNC2]], [[TRUNC5]]
@@ -104,8 +97,7 @@ body: |
     ; GFX8-NEXT: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
     ; GFX8-NEXT: $vgpr0 = COPY [[BITCAST4]](<2 x s16>)
     ; GFX8-NEXT: $vgpr1 = COPY [[BITCAST5]](<2 x s16>)
-    ; GFX8-NEXT: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-    ; GFX8-NEXT: S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+    ; GFX8-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
     ; GFX9-LABEL: name: add_v3i16
     ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -118,7 +110,6 @@ body: |
     ; GFX9-NEXT: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[COPY2]](<2 x s16>)
     ; GFX9-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
     ; GFX9-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY3]](<2 x s16>)
-    ; GFX9-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[BITCAST]](s32), [[LSHR]](s32)
     ; GFX9-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
     ; GFX9-NEXT: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[BITCAST1]](s32), [[DEF]](s32)
@@ -133,8 +124,7 @@ body: |
     ; GFX9-NEXT: [[BUILD_VECTOR_TRUNC5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[BITCAST5]](s32), [[DEF]](s32)
     ; GFX9-NEXT: $vgpr0 = COPY [[BUILD_VECTOR_TRUNC4]](<2 x s16>)
     ; GFX9-NEXT: $vgpr1 = COPY [[BUILD_VECTOR_TRUNC5]](<2 x s16>)
-    ; GFX9-NEXT: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
     %3:_(<2 x s16>) = COPY $vgpr0
     %4:_(<2 x s16>) = COPY $vgpr1
     %5:_(<4 x s16>) = G_CONCAT_VECTORS %3(<2 x s16>), %4(<2 x s16>)
@@ -145,7 +135,6 @@ body: |
     %12:_(<4 x s16>) = G_CONCAT_VECTORS %10(<2 x s16>), %11(<2 x s16>)
     %13:_(s16), %14:_(s16), %15:_(s16), %16:_(s16) = G_UNMERGE_VALUES %12(<4 x s16>)
     %1:_(<3 x s16>) = G_BUILD_VECTOR %13(s16), %14(s16), %15(s16)
-    %2:sgpr_64 = COPY $sgpr30_sgpr31
     %17:_(<3 x s16>) = G_ADD %0, %1
     %21:_(s16), %22:_(s16), %23:_(s16) = G_UNMERGE_VALUES %17(<3 x s16>)
     %24:_(s16) = G_IMPLICIT_DEF
@@ -153,15 +142,14 @@ body: |
     %19:_(<2 x s16>), %20:_(<2 x s16>) = G_UNMERGE_VALUES %25(<4 x s16>)
     $vgpr0 = COPY %19(<2 x s16>)
     $vgpr1 = COPY %20(<2 x s16>)
-    %18:ccr_sgpr_64 = COPY %2
-    S_SETPC_B64_return %18, implicit $vgpr0, implicit $vgpr1
+    SI_RETURN implicit $vgpr0, implicit $vgpr1
 ...
 
 ---
 name: shl_v3i16
 body: |
   bb.1:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
 
     ; GFX8-LABEL: name: shl_v3i16
     ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
@@ -181,7 +169,6 @@ body: |
     ; GFX8-NEXT: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
     ; GFX8-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY3]](<2 x s16>)
     ; GFX8-NEXT: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
-    ; GFX8-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX8-NEXT: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC3]](s16)
     ; GFX8-NEXT: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC4]](s16)
     ; GFX8-NEXT: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[TRUNC5]](s16)
@@ -197,8 +184,7 @@ body: |
     ; GFX8-NEXT: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
     ; GFX8-NEXT: $vgpr0 = COPY [[BITCAST4]](<2 x s16>)
     ; GFX8-NEXT: $vgpr1 = COPY [[BITCAST5]](<2 x s16>)
-    ; GFX8-NEXT: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-    ; GFX8-NEXT: S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+    ; GFX8-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
     ; GFX9-LABEL: name: shl_v3i16
     ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -213,7 +199,6 @@ body: |
     ; GFX9-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
     ; GFX9-NEXT: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[COPY3]](<2 x s16>)
     ; GFX9-NEXT: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
-    ; GFX9-NEXT: [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[BITCAST]](s32), [[LSHR]](s32)
     ; GFX9-NEXT: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[BITCAST2]](s32), [[LSHR1]](s32)
     ; GFX9-NEXT: [[SHL:%[0-9]+]]:_(<2 x s16>) = G_SHL [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
@@ -226,8 +211,7 @@ body: |
     ; GFX9-NEXT: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[ANYEXT]](s32), [[DEF]](s32)
     ; GFX9-NEXT: $vgpr0 = COPY [[BUILD_VECTOR_TRUNC2]](<2 x s16>)
     ; GFX9-NEXT: $vgpr1 = COPY [[BUILD_VECTOR_TRUNC3]](<2 x s16>)
-    ; GFX9-NEXT: [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY4]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY5]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
     %3:_(<2 x s16>) = COPY $vgpr0
     %4:_(<2 x s16>) = COPY $vgpr1
     %5:_(<4 x s16>) = G_CONCAT_VECTORS %3(<2 x s16>), %4(<2 x s16>)
@@ -238,7 +222,6 @@ body: |
     %12:_(<4 x s16>) = G_CONCAT_VECTORS %10(<2 x s16>), %11(<2 x s16>)
     %13:_(s16), %14:_(s16), %15:_(s16), %16:_(s16) = G_UNMERGE_VALUES %12(<4 x s16>)
     %1:_(<3 x s16>) = G_BUILD_VECTOR %13(s16), %14(s16), %15(s16)
-    %2:sgpr_64 = COPY $sgpr30_sgpr31
     %17:_(<3 x s16>) = G_SHL %0, %1(<3 x s16>)
     %21:_(s16), %22:_(s16), %23:_(s16) = G_UNMERGE_VALUES %17(<3 x s16>)
     %24:_(s16) = G_IMPLICIT_DEF
@@ -246,15 +229,14 @@ body: |
     %19:_(<2 x s16>), %20:_(<2 x s16>) = G_UNMERGE_VALUES %25(<4 x s16>)
     $vgpr0 = COPY %19(<2 x s16>)
     $vgpr1 = COPY %20(<2 x s16>)
-    %18:ccr_sgpr_64 = COPY %2
-    S_SETPC_B64_return %18, implicit $vgpr0, implicit $vgpr1
+    SI_RETURN implicit $vgpr0, implicit $vgpr1
 ...
 
 ---
 name: fma_v4f16
 body: |
   bb.1:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5
 
     ; GFX8-LABEL: name: fma_v4f16
     ; GFX8: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
@@ -263,7 +245,6 @@ body: |
     ; GFX8-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr3
     ; GFX8-NEXT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX8-NEXT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
-    ; GFX8-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX8-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
     ; GFX8-NEXT: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
     ; GFX8-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
@@ -305,8 +286,7 @@ body: |
     ; GFX8-NEXT: [[BITCAST7:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
     ; GFX8-NEXT: $vgpr0 = COPY [[BITCAST6]](<2 x s16>)
     ; GFX8-NEXT: $vgpr1 = COPY [[BITCAST7]](<2 x s16>)
-    ; GFX8-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX8-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX8-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
     ; GFX9-LABEL: name: fma_v4f16
     ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
     ; GFX9-NEXT: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
@@ -314,13 +294,11 @@ body: |
     ; GFX9-NEXT: [[COPY3:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr3
     ; GFX9-NEXT: [[COPY4:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr4
     ; GFX9-NEXT: [[COPY5:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr5
-    ; GFX9-NEXT: [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; GFX9-NEXT: [[FMA:%[0-9]+]]:_(<2 x s16>) = G_FMA [[COPY]], [[COPY2]], [[COPY4]]
     ; GFX9-NEXT: [[FMA1:%[0-9]+]]:_(<2 x s16>) = G_FMA [[COPY1]], [[COPY3]], [[COPY5]]
     ; GFX9-NEXT: $vgpr0 = COPY [[FMA]](<2 x s16>)
     ; GFX9-NEXT: $vgpr1 = COPY [[FMA1]](<2 x s16>)
-    ; GFX9-NEXT: [[COPY7:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY6]]
-    ; GFX9-NEXT: S_SETPC_B64_return [[COPY7]], implicit $vgpr0, implicit $vgpr1
+    ; GFX9-NEXT: SI_RETURN implicit $vgpr0, implicit $vgpr1
     %4:_(<2 x s16>) = COPY $vgpr0
     %5:_(<2 x s16>) = COPY $vgpr1
     %0:_(<4 x s16>) = G_CONCAT_VECTORS %4(<2 x s16>), %5(<2 x s16>)
@@ -330,13 +308,11 @@ body: |
     %8:_(<2 x s16>) = COPY $vgpr4
     %9:_(<2 x s16>) = COPY $vgpr5
     %2:_(<4 x s16>) = G_CONCAT_VECTORS %8(<2 x s16>), %9(<2 x s16>)
-    %3:sgpr_64 = COPY $sgpr30_sgpr31
     %10:_(<4 x s16>) = G_FMA %0, %1, %2
     %12:_(<2 x s16>), %13:_(<2 x s16>) = G_UNMERGE_VALUES %10(<4 x s16>)
     $vgpr0 = COPY %12(<2 x s16>)
     $vgpr1 = COPY %13(<2 x s16>)
-    %11:ccr_sgpr_64 = COPY %3
-    S_SETPC_B64_return %11, implicit $vgpr0, implicit $vgpr1
+    SI_RETURN implicit $vgpr0, implicit $vgpr1
 ...
 
 ---

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ds.fmax.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ds.fmax.ll
index 87ef4479d4bf5..815f9b1a07fe9 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ds.fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.amdgcn.ds.fmax.ll
@@ -187,25 +187,21 @@ define float @ds_fmax_f32_vv(float addrspace(3)* %ptr, float %val) {
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
   ; GFX8-MIR-LABEL: name: ds_fmax_f32_vv
   ; GFX8-MIR: bb.1 (%ir-block.0):
-  ; GFX8-MIR:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GFX8-MIR:   liveins: $vgpr0, $vgpr1
   ; GFX8-MIR:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX8-MIR:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GFX8-MIR:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX8-MIR:   $m0 = S_MOV_B32 -1
   ; GFX8-MIR:   [[DS_MAX_RTN_F32_:%[0-9]+]]:vgpr_32 = DS_MAX_RTN_F32 [[COPY]], [[COPY1]], 0, 0, implicit $m0, implicit $exec :: (load store (s32) on %ir.ptr, addrspace 3)
   ; GFX8-MIR:   $vgpr0 = COPY [[DS_MAX_RTN_F32_]]
-  ; GFX8-MIR:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GFX8-MIR:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; GFX8-MIR:   SI_RETURN implicit $vgpr0
   ; GFX9-MIR-LABEL: name: ds_fmax_f32_vv
   ; GFX9-MIR: bb.1 (%ir-block.0):
-  ; GFX9-MIR:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GFX9-MIR:   liveins: $vgpr0, $vgpr1
   ; GFX9-MIR:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX9-MIR:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GFX9-MIR:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX9-MIR:   [[DS_MAX_RTN_F32_gfx9_:%[0-9]+]]:vgpr_32 = DS_MAX_RTN_F32_gfx9 [[COPY]], [[COPY1]], 0, 0, implicit $exec :: (load store (s32) on %ir.ptr, addrspace 3)
   ; GFX9-MIR:   $vgpr0 = COPY [[DS_MAX_RTN_F32_gfx9_]]
-  ; GFX9-MIR:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GFX9-MIR:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; GFX9-MIR:   SI_RETURN implicit $vgpr0
   %ret = call float @llvm.amdgcn.ds.fmax(float addrspace(3)* %ptr, float %val, i32 0, i32 0, i1 false)
   ret float %ret
 }
@@ -227,25 +223,21 @@ define float @ds_fmax_f32_vv_offset(float addrspace(3)* %ptr, float %val) {
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
   ; GFX8-MIR-LABEL: name: ds_fmax_f32_vv_offset
   ; GFX8-MIR: bb.1 (%ir-block.0):
-  ; GFX8-MIR:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GFX8-MIR:   liveins: $vgpr0, $vgpr1
   ; GFX8-MIR:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX8-MIR:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GFX8-MIR:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX8-MIR:   $m0 = S_MOV_B32 -1
   ; GFX8-MIR:   [[DS_MAX_RTN_F32_:%[0-9]+]]:vgpr_32 = DS_MAX_RTN_F32 [[COPY]], [[COPY1]], 512, 0, implicit $m0, implicit $exec :: (load store (s32) on %ir.gep, addrspace 3)
   ; GFX8-MIR:   $vgpr0 = COPY [[DS_MAX_RTN_F32_]]
-  ; GFX8-MIR:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GFX8-MIR:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; GFX8-MIR:   SI_RETURN implicit $vgpr0
   ; GFX9-MIR-LABEL: name: ds_fmax_f32_vv_offset
   ; GFX9-MIR: bb.1 (%ir-block.0):
-  ; GFX9-MIR:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GFX9-MIR:   liveins: $vgpr0, $vgpr1
   ; GFX9-MIR:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX9-MIR:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GFX9-MIR:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX9-MIR:   [[DS_MAX_RTN_F32_gfx9_:%[0-9]+]]:vgpr_32 = DS_MAX_RTN_F32_gfx9 [[COPY]], [[COPY1]], 512, 0, implicit $exec :: (load store (s32) on %ir.gep, addrspace 3)
   ; GFX9-MIR:   $vgpr0 = COPY [[DS_MAX_RTN_F32_gfx9_]]
-  ; GFX9-MIR:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GFX9-MIR:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; GFX9-MIR:   SI_RETURN implicit $vgpr0
   %gep = getelementptr float, float addrspace(3)* %ptr, i32 128
   %ret = call float @llvm.amdgcn.ds.fmax(float addrspace(3)* %gep, float %val, i32 0, i32 0, i1 false)
   ret float %ret
@@ -268,23 +260,19 @@ define void @ds_fmax_f32_vv_nortn(float addrspace(3)* %ptr, float %val) {
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
   ; GFX8-MIR-LABEL: name: ds_fmax_f32_vv_nortn
   ; GFX8-MIR: bb.1 (%ir-block.0):
-  ; GFX8-MIR:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GFX8-MIR:   liveins: $vgpr0, $vgpr1
   ; GFX8-MIR:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX8-MIR:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GFX8-MIR:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX8-MIR:   $m0 = S_MOV_B32 -1
   ; GFX8-MIR:   [[DS_MAX_RTN_F32_:%[0-9]+]]:vgpr_32 = DS_MAX_RTN_F32 [[COPY]], [[COPY1]], 0, 0, implicit $m0, implicit $exec :: (load store (s32) on %ir.ptr, addrspace 3)
-  ; GFX8-MIR:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GFX8-MIR:   S_SETPC_B64_return [[COPY3]]
+  ; GFX8-MIR:   SI_RETURN
   ; GFX9-MIR-LABEL: name: ds_fmax_f32_vv_nortn
   ; GFX9-MIR: bb.1 (%ir-block.0):
-  ; GFX9-MIR:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GFX9-MIR:   liveins: $vgpr0, $vgpr1
   ; GFX9-MIR:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX9-MIR:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GFX9-MIR:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX9-MIR:   [[DS_MAX_RTN_F32_gfx9_:%[0-9]+]]:vgpr_32 = DS_MAX_RTN_F32_gfx9 [[COPY]], [[COPY1]], 0, 0, implicit $exec :: (load store (s32) on %ir.ptr, addrspace 3)
-  ; GFX9-MIR:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GFX9-MIR:   S_SETPC_B64_return [[COPY3]]
+  ; GFX9-MIR:   SI_RETURN
   %ret = call float @llvm.amdgcn.ds.fmax(float addrspace(3)* %ptr, float %val, i32 0, i32 0, i1 false)
   ret void
 }
@@ -306,23 +294,19 @@ define void @ds_fmax_f32_vv_offset_nortn(float addrspace(3)* %ptr, float %val) {
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
   ; GFX8-MIR-LABEL: name: ds_fmax_f32_vv_offset_nortn
   ; GFX8-MIR: bb.1 (%ir-block.0):
-  ; GFX8-MIR:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GFX8-MIR:   liveins: $vgpr0, $vgpr1
   ; GFX8-MIR:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX8-MIR:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GFX8-MIR:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX8-MIR:   $m0 = S_MOV_B32 -1
   ; GFX8-MIR:   [[DS_MAX_RTN_F32_:%[0-9]+]]:vgpr_32 = DS_MAX_RTN_F32 [[COPY]], [[COPY1]], 512, 0, implicit $m0, implicit $exec :: (load store (s32) on %ir.gep, addrspace 3)
-  ; GFX8-MIR:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GFX8-MIR:   S_SETPC_B64_return [[COPY3]]
+  ; GFX8-MIR:   SI_RETURN
   ; GFX9-MIR-LABEL: name: ds_fmax_f32_vv_offset_nortn
   ; GFX9-MIR: bb.1 (%ir-block.0):
-  ; GFX9-MIR:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GFX9-MIR:   liveins: $vgpr0, $vgpr1
   ; GFX9-MIR:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX9-MIR:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GFX9-MIR:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX9-MIR:   [[DS_MAX_RTN_F32_gfx9_:%[0-9]+]]:vgpr_32 = DS_MAX_RTN_F32_gfx9 [[COPY]], [[COPY1]], 512, 0, implicit $exec :: (load store (s32) on %ir.gep, addrspace 3)
-  ; GFX9-MIR:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GFX9-MIR:   S_SETPC_B64_return [[COPY3]]
+  ; GFX9-MIR:   SI_RETURN
   %gep = getelementptr float, float addrspace(3)* %ptr, i32 128
   %ret = call float @llvm.amdgcn.ds.fmax(float addrspace(3)* %gep, float %val, i32 0, i32 0, i1 false)
   ret void
@@ -345,25 +329,21 @@ define float @ds_fmax_f32_vv_volatile(float addrspace(3)* %ptr, float %val) {
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
   ; GFX8-MIR-LABEL: name: ds_fmax_f32_vv_volatile
   ; GFX8-MIR: bb.1 (%ir-block.0):
-  ; GFX8-MIR:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GFX8-MIR:   liveins: $vgpr0, $vgpr1
   ; GFX8-MIR:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX8-MIR:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GFX8-MIR:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX8-MIR:   $m0 = S_MOV_B32 -1
   ; GFX8-MIR:   [[DS_MAX_RTN_F32_:%[0-9]+]]:vgpr_32 = DS_MAX_RTN_F32 [[COPY]], [[COPY1]], 0, 0, implicit $m0, implicit $exec :: (volatile load store (s32) on %ir.ptr, addrspace 3)
   ; GFX8-MIR:   $vgpr0 = COPY [[DS_MAX_RTN_F32_]]
-  ; GFX8-MIR:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GFX8-MIR:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; GFX8-MIR:   SI_RETURN implicit $vgpr0
   ; GFX9-MIR-LABEL: name: ds_fmax_f32_vv_volatile
   ; GFX9-MIR: bb.1 (%ir-block.0):
-  ; GFX9-MIR:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GFX9-MIR:   liveins: $vgpr0, $vgpr1
   ; GFX9-MIR:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
   ; GFX9-MIR:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GFX9-MIR:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
   ; GFX9-MIR:   [[DS_MAX_RTN_F32_gfx9_:%[0-9]+]]:vgpr_32 = DS_MAX_RTN_F32_gfx9 [[COPY]], [[COPY1]], 0, 0, implicit $exec :: (volatile load store (s32) on %ir.ptr, addrspace 3)
   ; GFX9-MIR:   $vgpr0 = COPY [[DS_MAX_RTN_F32_gfx9_]]
-  ; GFX9-MIR:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GFX9-MIR:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; GFX9-MIR:   SI_RETURN implicit $vgpr0
   %ret = call float @llvm.amdgcn.ds.fmax(float addrspace(3)* %ptr, float %val, i32 0, i32 0, i1 true)
   ret float %ret
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/load-unaligned.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-unaligned.ll
index d325ed2f9ac08..8a60d327e653e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/load-unaligned.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/load-unaligned.ll
@@ -269,16 +269,16 @@ define void @store_lds_v4i32_align1(<4 x i32> addrspace(3)* %out, <4 x i32> %x)
 ; GFX10-NEXT:    v_lshrrev_b16 v6, 8, v1
 ; GFX10-NEXT:    ds_write_b8 v0, v1
 ; GFX10-NEXT:    ds_write_b8_d16_hi v0, v1 offset:2
+; GFX10-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
 ; GFX10-NEXT:    v_lshrrev_b16 v7, 8, v2
-; GFX10-NEXT:    v_lshrrev_b16 v1, 8, v5
-; GFX10-NEXT:    v_lshrrev_b32_e32 v5, 16, v2
+; GFX10-NEXT:    v_lshrrev_b16 v5, 8, v5
 ; GFX10-NEXT:    ds_write_b8 v0, v2 offset:4
 ; GFX10-NEXT:    ds_write_b8 v0, v6 offset:1
-; GFX10-NEXT:    ds_write_b8 v0, v1 offset:3
+; GFX10-NEXT:    ds_write_b8 v0, v5 offset:3
 ; GFX10-NEXT:    ds_write_b8 v0, v7 offset:5
 ; GFX10-NEXT:    ds_write_b8_d16_hi v0, v2 offset:6
+; GFX10-NEXT:    v_lshrrev_b16 v1, 8, v1
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v2, 16, v3
-; GFX10-NEXT:    v_lshrrev_b16 v1, 8, v5
 ; GFX10-NEXT:    v_lshrrev_b16 v5, 8, v3
 ; GFX10-NEXT:    ds_write_b8 v0, v1 offset:7
 ; GFX10-NEXT:    ds_write_b8 v0, v3 offset:8
@@ -344,17 +344,17 @@ define void @store_lds_v3i32_align1(<3 x i32> addrspace(3)* %out, <3 x i32> %x)
 ; GFX10-NEXT:    ds_write_b8 v0, v1
 ; GFX10-NEXT:    ds_write_b8_d16_hi v0, v1 offset:2
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v1, 16, v2
-; GFX10-NEXT:    v_lshrrev_b32_e32 v7, 16, v3
-; GFX10-NEXT:    v_lshrrev_b16 v4, 8, v4
 ; GFX10-NEXT:    v_lshrrev_b16 v6, 8, v2
+; GFX10-NEXT:    v_lshrrev_b16 v4, 8, v4
+; GFX10-NEXT:    v_lshrrev_b32_e32 v7, 16, v3
 ; GFX10-NEXT:    ds_write_b8 v0, v2 offset:4
 ; GFX10-NEXT:    v_lshrrev_b16 v1, 8, v1
 ; GFX10-NEXT:    ds_write_b8 v0, v5 offset:1
 ; GFX10-NEXT:    ds_write_b8 v0, v4 offset:3
+; GFX10-NEXT:    ds_write_b8 v0, v6 offset:5
 ; GFX10-NEXT:    v_lshrrev_b16 v4, 8, v3
 ; GFX10-NEXT:    ds_write_b8_d16_hi v0, v2 offset:6
 ; GFX10-NEXT:    v_lshrrev_b16 v2, 8, v7
-; GFX10-NEXT:    ds_write_b8 v0, v6 offset:5
 ; GFX10-NEXT:    ds_write_b8 v0, v1 offset:7
 ; GFX10-NEXT:    ds_write_b8 v0, v3 offset:8
 ; GFX10-NEXT:    ds_write_b8 v0, v4 offset:9

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll
index 615ee8472d476..f1abd389a2391 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll
@@ -243,15 +243,15 @@ define void @sink_null_insert_pt(i32 addrspace(4)* %arg0) {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX9-NEXT:    s_swappc_b64 s[30:31], 0
-; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
-; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
-; GFX9-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GFX9-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GFX9-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GFX9-NEXT:    s_mov_b64 exec, s[6:7]
+; GFX9-NEXT:    s_mov_b64 exec, s[4:5]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    s_setpc_b64 s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %load0 = load volatile i32, i32 addrspace(1)* null, align 4
   br label %bb1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
index 2d440467622e8..c2f624f3a2ee2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/lshr.ll
@@ -1644,8 +1644,8 @@ define i65 @v_lshr_i65(i65 %value, i65 %amount) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    v_and_b32_e32 v4, 1, v2
 ; GFX10-NEXT:    v_mov_b32_e32 v5, 0
+; GFX10-NEXT:    v_and_b32_e32 v4, 1, v2
 ; GFX10-NEXT:    v_sub_nc_u32_e32 v2, 64, v3
 ; GFX10-NEXT:    v_subrev_nc_u32_e32 v10, 64, v3
 ; GFX10-NEXT:    v_lshrrev_b64 v[6:7], v3, v[0:1]

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankcombiner-smed3.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankcombiner-smed3.mir
index da576626e9954..ab05e6ef7ead5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankcombiner-smed3.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankcombiner-smed3.mir
@@ -9,23 +9,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_min_max_ValK0_K1_i32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -12
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_SMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_SMED3 [[COPY]], [[COPY2]], [[COPY3]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_SMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 -12
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_SMAX %0, %7
@@ -33,8 +30,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_SMIN %3, %8
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -44,23 +40,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: min_max_ValK0_K1_i32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -12
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_SMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_SMED3 [[COPY]], [[COPY2]], [[COPY3]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_SMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 -12
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_SMAX %7, %0
@@ -68,8 +61,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_SMIN %3, %8
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -79,23 +71,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_min_K1max_ValK0__i32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -12
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_SMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_SMED3 [[COPY]], [[COPY2]], [[COPY3]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_SMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 -12
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_SMAX %0, %7
@@ -103,8 +92,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_SMIN %8, %3
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -114,23 +102,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_min_K1max_K0Val__i32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -12
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_SMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_SMED3 [[COPY]], [[COPY2]], [[COPY3]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_SMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 -12
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_SMAX %7, %0
@@ -138,8 +123,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_SMIN %8, %3
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -149,23 +133,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_max_min_ValK1_K0_i32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -12
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_SMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_SMED3 [[COPY]], [[COPY3]], [[COPY2]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_SMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 17
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_SMIN %0, %7
@@ -173,8 +154,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_SMAX %3, %8
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -184,23 +164,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_max_min_K1Val_K0_i32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -12
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_SMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_SMED3 [[COPY]], [[COPY3]], [[COPY2]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_SMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 17
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_SMIN %7, %0
@@ -208,8 +185,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_SMAX %3, %8
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -219,23 +195,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_max_K0min_ValK1__i32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -12
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_SMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_SMED3 [[COPY]], [[COPY3]], [[COPY2]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_SMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 17
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_SMIN %0, %7
@@ -243,8 +216,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_SMAX %8, %3
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -254,23 +226,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_max_K0min_K1Val__i32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -12
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_SMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_SMED3 [[COPY]], [[COPY3]], [[COPY2]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_SMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 17
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_SMIN %7, %0
@@ -278,8 +247,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_SMAX %8, %3
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -289,13 +257,12 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_max_K0min_K1Val__v2i16
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C]](s32), [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -12
@@ -305,10 +272,8 @@ body: |
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
     ; CHECK-NEXT: [[SMAX:%[0-9]+]]:vgpr(<2 x s16>) = G_SMAX [[COPY3]], [[SMIN]]
     ; CHECK-NEXT: $vgpr0 = COPY [[SMAX]](<2 x s16>)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(<2 x s16>) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %9:sgpr(s32) = G_CONSTANT i32 17
     %2:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC %9(s32), %9(s32)
     %10:sgpr(s32) = G_CONSTANT i32 -12
@@ -318,8 +283,7 @@ body: |
     %12:vgpr(<2 x s16>) = COPY %5(<2 x s16>)
     %7:vgpr(<2 x s16>) = G_SMAX %12, %4
     $vgpr0 = COPY %7(<2 x s16>)
-    %8:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %8, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -361,23 +325,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_non_inline_constant_i32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 -12
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 65
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_SMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_SMED3 [[COPY]], [[COPY2]], [[COPY3]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_SMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 -12
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_SMAX %0, %7
@@ -385,6 +346,5 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_SMIN %3, %8
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankcombiner-umed3.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankcombiner-umed3.mir
index 8c5d796a60e06..4ecc912fc7e44 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankcombiner-umed3.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankcombiner-umed3.mir
@@ -9,23 +9,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_min_max_ValK0_K1_u32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 12
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_UMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_UMED3 [[COPY]], [[COPY2]], [[COPY3]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_UMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 12
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_UMAX %0, %7
@@ -33,8 +30,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_UMIN %3, %8
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -44,23 +40,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: min_max_ValK0_K1_i32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 12
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_UMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_UMED3 [[COPY]], [[COPY2]], [[COPY3]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_UMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 12
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_UMAX %7, %0
@@ -68,8 +61,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_UMIN %3, %8
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -79,23 +71,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_min_K1max_ValK0__u32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 12
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_UMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_UMED3 [[COPY]], [[COPY2]], [[COPY3]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_UMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 12
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_UMAX %0, %7
@@ -103,8 +92,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_UMIN %8, %3
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -114,23 +102,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_min_K1max_K0Val__u32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 12
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_UMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_UMED3 [[COPY]], [[COPY2]], [[COPY3]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_UMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 12
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_UMAX %7, %0
@@ -138,8 +123,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_UMIN %8, %3
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -149,23 +133,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_max_min_ValK1_K0_u32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 12
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_UMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_UMED3 [[COPY]], [[COPY3]], [[COPY2]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_UMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 17
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_UMIN %0, %7
@@ -173,8 +154,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_UMAX %3, %8
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -184,23 +164,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_max_min_K1Val_K0_u32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 12
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_UMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_UMED3 [[COPY]], [[COPY3]], [[COPY2]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_UMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 17
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_UMIN %7, %0
@@ -208,8 +185,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_UMAX %3, %8
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -219,23 +195,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_max_K0min_ValK1__u32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 12
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_UMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_UMED3 [[COPY]], [[COPY3]], [[COPY2]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_UMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 17
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_UMIN %0, %7
@@ -243,8 +216,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_UMAX %8, %3
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -254,23 +226,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_max_K0min_K1Val__u32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 12
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_UMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_UMED3 [[COPY]], [[COPY3]], [[COPY2]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_UMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 17
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_UMIN %7, %0
@@ -278,8 +247,7 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_UMAX %8, %3
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -289,13 +257,12 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_max_K0min_K1Val__v2u16
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(<2 x s16>) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 17
     ; CHECK-NEXT: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C]](s32), [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 12
@@ -305,10 +272,8 @@ body: |
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(<2 x s16>) = COPY [[BUILD_VECTOR_TRUNC1]](<2 x s16>)
     ; CHECK-NEXT: [[UMAX:%[0-9]+]]:vgpr(<2 x s16>) = G_UMAX [[COPY3]], [[UMIN]]
     ; CHECK-NEXT: $vgpr0 = COPY [[UMAX]](<2 x s16>)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(<2 x s16>) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %9:sgpr(s32) = G_CONSTANT i32 17
     %2:sgpr(<2 x s16>) = G_BUILD_VECTOR_TRUNC %9(s32), %9(s32)
     %10:sgpr(s32) = G_CONSTANT i32 12
@@ -318,8 +283,7 @@ body: |
     %12:vgpr(<2 x s16>) = COPY %5(<2 x s16>)
     %7:vgpr(<2 x s16>) = G_UMAX %12, %4
     $vgpr0 = COPY %7(<2 x s16>)
-    %8:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %8, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...
 
 ---
@@ -362,23 +326,20 @@ regBankSelected: true
 tracksRegLiveness: true
 body: |
   bb.1:
-    liveins: $vgpr0, $sgpr30_sgpr31
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_non_inline_constant_i32
-    ; CHECK: liveins: $vgpr0, $sgpr30_sgpr31
+    ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0
-    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
     ; CHECK-NEXT: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 12
     ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vgpr(s32) = COPY [[C]](s32)
     ; CHECK-NEXT: [[C1:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 65
     ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[C1]](s32)
     ; CHECK-NEXT: [[AMDGPU_UMED3_:%[0-9]+]]:vgpr(s32) = G_AMDGPU_UMED3 [[COPY]], [[COPY2]], [[COPY3]]
     ; CHECK-NEXT: $vgpr0 = COPY [[AMDGPU_UMED3_]](s32)
-    ; CHECK-NEXT: [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY1]]
-    ; CHECK-NEXT: S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+    ; CHECK-NEXT: S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
     %0:vgpr(s32) = COPY $vgpr0
-    %1:sgpr_64 = COPY $sgpr30_sgpr31
     %2:sgpr(s32) = G_CONSTANT i32 12
     %7:vgpr(s32) = COPY %2(s32)
     %3:vgpr(s32) = G_UMAX %0, %7
@@ -386,6 +347,5 @@ body: |
     %8:vgpr(s32) = COPY %4(s32)
     %5:vgpr(s32) = G_UMIN %3, %8
     $vgpr0 = COPY %5(s32)
-    %6:ccr_sgpr_64 = COPY %1
-    S_SETPC_B64_return %6, implicit $vgpr0
+    S_SETPC_B64_return undef $sgpr30_sgpr31, implicit $vgpr0
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll
index d9385251a4dcb..6416026570058 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/roundeven.ll
@@ -401,13 +401,13 @@ define <4 x half> @v_roundeven_v4f16(<4 x half> %x) {
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    v_rndne_f16_e32 v2, v0
+; GFX10-NEXT:    v_mov_b32_e32 v2, 0xffff
+; GFX10-NEXT:    v_rndne_f16_e32 v3, v0
 ; GFX10-NEXT:    v_rndne_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT:    v_rndne_f16_e32 v3, v1
-; GFX10-NEXT:    v_mov_b32_e32 v4, 0xffff
+; GFX10-NEXT:    v_rndne_f16_e32 v4, v1
 ; GFX10-NEXT:    v_rndne_f16_sdwa v1, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX10-NEXT:    v_and_or_b32 v0, v2, v4, v0
-; GFX10-NEXT:    v_and_or_b32 v1, v3, v4, v1
+; GFX10-NEXT:    v_and_or_b32 v0, v3, v2, v0
+; GFX10-NEXT:    v_and_or_b32 v1, v4, v2, v1
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
   %roundeven = call <4 x half> @llvm.roundeven.v4f16(<4 x half> %x)
   ret <4 x half> %roundeven

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
index 77c182f1a8ac7..642b20879ea04 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
@@ -642,25 +642,25 @@ define i32 @v_saddsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v4, 24, v0
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v5, 24, v1
 ; GFX10-NEXT:    s_mov_b32 s4, 8
-; GFX10-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
-; GFX10-NEXT:    v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT:    v_lshrrev_b32_sdwa v6, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT:    v_mov_b32_e32 v7, 0xffff
+; GFX10-NEXT:    v_mov_b32_e32 v2, 0xffff
+; GFX10-NEXT:    v_lshrrev_b32_sdwa v3, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
+; GFX10-NEXT:    v_lshrrev_b32_sdwa v7, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v8, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT:    v_and_or_b32 v0, v0, v2, v3
+; GFX10-NEXT:    v_and_or_b32 v1, v1, v2, v7
 ; GFX10-NEXT:    s_movk_i32 s4, 0xff
-; GFX10-NEXT:    v_and_or_b32 v0, v0, v7, v2
-; GFX10-NEXT:    v_and_or_b32 v1, v1, v7, v6
-; GFX10-NEXT:    v_and_or_b32 v2, v3, v7, v4
-; GFX10-NEXT:    v_and_or_b32 v3, v8, v7, v5
-; GFX10-NEXT:    v_mov_b32_e32 v4, 24
+; GFX10-NEXT:    v_and_or_b32 v3, v6, v2, v4
+; GFX10-NEXT:    v_and_or_b32 v2, v8, v2, v5
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1]
-; GFX10-NEXT:    v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1]
+; GFX10-NEXT:    v_mov_b32_e32 v4, 24
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1]
+; GFX10-NEXT:    v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_add_i16 v0, v0, v1 clamp
-; GFX10-NEXT:    v_pk_add_i16 v1, v2, v3 clamp
+; GFX10-NEXT:    v_pk_add_i16 v1, v3, v2 clamp
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 8
 ; GFX10-NEXT:    v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1]

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
index dbd4eab951828..f3509f3e80694 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
@@ -642,25 +642,25 @@ define i32 @v_ssubsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v4, 24, v0
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v5, 24, v1
 ; GFX10-NEXT:    s_mov_b32 s4, 8
-; GFX10-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
-; GFX10-NEXT:    v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT:    v_lshrrev_b32_sdwa v6, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT:    v_mov_b32_e32 v7, 0xffff
+; GFX10-NEXT:    v_mov_b32_e32 v2, 0xffff
+; GFX10-NEXT:    v_lshrrev_b32_sdwa v3, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
+; GFX10-NEXT:    v_lshrrev_b32_sdwa v7, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v8, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT:    v_and_or_b32 v0, v0, v2, v3
+; GFX10-NEXT:    v_and_or_b32 v1, v1, v2, v7
 ; GFX10-NEXT:    s_movk_i32 s4, 0xff
-; GFX10-NEXT:    v_and_or_b32 v0, v0, v7, v2
-; GFX10-NEXT:    v_and_or_b32 v1, v1, v7, v6
-; GFX10-NEXT:    v_and_or_b32 v2, v3, v7, v4
-; GFX10-NEXT:    v_and_or_b32 v3, v8, v7, v5
-; GFX10-NEXT:    v_mov_b32_e32 v4, 24
+; GFX10-NEXT:    v_and_or_b32 v3, v6, v2, v4
+; GFX10-NEXT:    v_and_or_b32 v2, v8, v2, v5
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1]
-; GFX10-NEXT:    v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1]
+; GFX10-NEXT:    v_mov_b32_e32 v4, 24
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1]
+; GFX10-NEXT:    v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_sub_i16 v0, v0, v1 clamp
-; GFX10-NEXT:    v_pk_sub_i16 v1, v2, v3 clamp
+; GFX10-NEXT:    v_pk_sub_i16 v1, v3, v2 clamp
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 8
 ; GFX10-NEXT:    v_pk_ashrrev_i16 v0, 8, v0 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_ashrrev_i16 v1, 8, v1 op_sel_hi:[0,1]

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
index c248c4236e067..b07bdeeabd7ce 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
@@ -469,25 +469,25 @@ define i32 @v_uaddsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v4, 24, v0
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v5, 24, v1
 ; GFX10-NEXT:    s_mov_b32 s4, 8
-; GFX10-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
-; GFX10-NEXT:    v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT:    v_lshrrev_b32_sdwa v6, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT:    v_mov_b32_e32 v7, 0xffff
+; GFX10-NEXT:    v_mov_b32_e32 v2, 0xffff
+; GFX10-NEXT:    v_lshrrev_b32_sdwa v3, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
+; GFX10-NEXT:    v_lshrrev_b32_sdwa v7, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v8, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT:    v_and_or_b32 v0, v0, v2, v3
+; GFX10-NEXT:    v_and_or_b32 v1, v1, v2, v7
 ; GFX10-NEXT:    s_movk_i32 s4, 0xff
-; GFX10-NEXT:    v_and_or_b32 v0, v0, v7, v2
-; GFX10-NEXT:    v_and_or_b32 v1, v1, v7, v6
-; GFX10-NEXT:    v_and_or_b32 v2, v3, v7, v4
-; GFX10-NEXT:    v_and_or_b32 v3, v8, v7, v5
-; GFX10-NEXT:    v_mov_b32_e32 v4, 24
+; GFX10-NEXT:    v_and_or_b32 v3, v6, v2, v4
+; GFX10-NEXT:    v_and_or_b32 v2, v8, v2, v5
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1]
-; GFX10-NEXT:    v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1]
+; GFX10-NEXT:    v_mov_b32_e32 v4, 24
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1]
+; GFX10-NEXT:    v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_add_u16 v0, v0, v1 clamp
-; GFX10-NEXT:    v_pk_add_u16 v1, v2, v3 clamp
+; GFX10-NEXT:    v_pk_add_u16 v1, v3, v2 clamp
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 8
 ; GFX10-NEXT:    v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_lshrrev_b16 v1, 8, v1 op_sel_hi:[0,1]

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
index 9676db3eba17e..805afaad6d3e7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
@@ -457,25 +457,25 @@ define i32 @v_usubsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v4, 24, v0
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v5, 24, v1
 ; GFX10-NEXT:    s_mov_b32 s4, 8
-; GFX10-NEXT:    v_lshrrev_b32_e32 v3, 16, v0
-; GFX10-NEXT:    v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT:    v_lshrrev_b32_sdwa v6, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX10-NEXT:    v_mov_b32_e32 v7, 0xffff
+; GFX10-NEXT:    v_mov_b32_e32 v2, 0xffff
+; GFX10-NEXT:    v_lshrrev_b32_sdwa v3, s4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
+; GFX10-NEXT:    v_lshrrev_b32_sdwa v7, s4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v8, 16, v1
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v4, 16, v4
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v5, 16, v5
+; GFX10-NEXT:    v_and_or_b32 v0, v0, v2, v3
+; GFX10-NEXT:    v_and_or_b32 v1, v1, v2, v7
 ; GFX10-NEXT:    s_movk_i32 s4, 0xff
-; GFX10-NEXT:    v_and_or_b32 v0, v0, v7, v2
-; GFX10-NEXT:    v_and_or_b32 v1, v1, v7, v6
-; GFX10-NEXT:    v_and_or_b32 v2, v3, v7, v4
-; GFX10-NEXT:    v_and_or_b32 v3, v8, v7, v5
-; GFX10-NEXT:    v_mov_b32_e32 v4, 24
+; GFX10-NEXT:    v_and_or_b32 v3, v6, v2, v4
+; GFX10-NEXT:    v_and_or_b32 v2, v8, v2, v5
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v0, 8, v0 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v1, 8, v1 op_sel_hi:[0,1]
-; GFX10-NEXT:    v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1]
+; GFX10-NEXT:    v_mov_b32_e32 v4, 24
 ; GFX10-NEXT:    v_pk_lshlrev_b16 v3, 8, v3 op_sel_hi:[0,1]
+; GFX10-NEXT:    v_pk_lshlrev_b16 v2, 8, v2 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_sub_u16 v0, v0, v1 clamp
-; GFX10-NEXT:    v_pk_sub_u16 v1, v2, v3 clamp
+; GFX10-NEXT:    v_pk_sub_u16 v1, v3, v2 clamp
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 8
 ; GFX10-NEXT:    v_pk_lshrrev_b16 v0, 8, v0 op_sel_hi:[0,1]
 ; GFX10-NEXT:    v_pk_lshrrev_b16 v1, 8, v1 op_sel_hi:[0,1]

diff  --git a/llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll b/llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll
index 11084620176dd..dcde1b181b0cf 100644
--- a/llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll
+++ b/llvm/test/CodeGen/AMDGPU/abi-attribute-hints-undefined-behavior.ll
@@ -29,15 +29,15 @@ define void @parent_func_missing_inputs() #0 {
 ; FIXEDABI-NEXT:    s_add_u32 s16, s16, requires_all_inputs at rel32@lo+4
 ; FIXEDABI-NEXT:    s_addc_u32 s17, s17, requires_all_inputs at rel32@hi+12
 ; FIXEDABI-NEXT:    s_swappc_b64 s[30:31], s[16:17]
-; FIXEDABI-NEXT:    v_readlane_b32 s4, v40, 0
-; FIXEDABI-NEXT:    v_readlane_b32 s5, v40, 1
+; FIXEDABI-NEXT:    v_readlane_b32 s31, v40, 1
+; FIXEDABI-NEXT:    v_readlane_b32 s30, v40, 0
 ; FIXEDABI-NEXT:    s_addk_i32 s32, 0xfc00
 ; FIXEDABI-NEXT:    v_readlane_b32 s33, v40, 2
-; FIXEDABI-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; FIXEDABI-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; FIXEDABI-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; FIXEDABI-NEXT:    s_mov_b64 exec, s[6:7]
+; FIXEDABI-NEXT:    s_mov_b64 exec, s[4:5]
 ; FIXEDABI-NEXT:    s_waitcnt vmcnt(0)
-; FIXEDABI-NEXT:    s_setpc_b64 s[4:5]
+; FIXEDABI-NEXT:    s_setpc_b64 s[30:31]
   call void @requires_all_inputs()
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-known-non-null.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-known-non-null.ll
index f3971427e3c5a..2edaa889c1afc 100644
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast-known-non-null.ll
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-known-non-null.ll
@@ -11,8 +11,8 @@ define void @flat_user(i8* %ptr) {
 
 ; CHECK-LABEL: {{^}}cast_alloca:
 ; CHECK: s_getreg_b32 [[GETREG:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 0, 16)
-; CHECK-NEXT: s_lshl_b32 [[APERTURE:s[0-9]+]], [[GETREG]], 16
-; CHECK-NEXT: v_lshrrev_b32_e64 v0, 6, s33
+; CHECK: s_lshl_b32 [[APERTURE:s[0-9]+]], [[GETREG]], 16
+; CHECK: v_lshrrev_b32_e64 v0, 6, s33
 ; CHECK-NEXT: v_mov_b32_e32 v1, [[APERTURE]]
 ; CHECK-NOT: v0
 ; CHECK-NOT: v1
@@ -27,9 +27,9 @@ define void @cast_alloca() {
 
 ; CHECK-LABEL: {{^}}cast_lds_gv:
 ; CHECK: s_getreg_b32 [[GETREG:s[0-9]+]], hwreg(HW_REG_SH_MEM_BASES, 16, 16)
-; CHECK-NEXT: s_lshl_b32 [[APERTURE:s[0-9]+]], [[GETREG]], 16
-; CHECK-NEXT: v_mov_b32_e32 v0, 0
-; CHECK-NEXT: v_mov_b32_e32 v1, [[APERTURE]]
+; CHECK: s_lshl_b32 [[APERTURE:s[0-9]+]], [[GETREG]], 16
+; CHECK: v_mov_b32_e32 v0, 0
+; CHECK: v_mov_b32_e32 v1, [[APERTURE]]
 ; CHECK-NOT: v0
 ; CHECK-NOT: v1
 define void @cast_lds_gv() {
@@ -72,4 +72,4 @@ define void @cast_constant_lds_other_gv() {
 define void @cast_constant_private_other_gv() {
   call void @flat_user(i8* addrspacecast (i8 addrspace(5)* inttoptr (i32 123 to i8 addrspace(5)*) to i8*))
   ret void
-}
\ No newline at end of file
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
index cee75918cd37a..e7b1002d712d5 100644
--- a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
+++ b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
@@ -8,8 +8,8 @@ define void @no_free_vgprs_at_agpr_copy(float %v0, float %v1) #0 {
 ; GFX908-LABEL: no_free_vgprs_at_agpr_copy:
 ; GFX908:       ; %bb.0:
 ; GFX908-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX908-NEXT:    v_mov_b32_e32 v33, v0
-; GFX908-NEXT:    v_mov_b32_e32 v34, v1
+; GFX908-NEXT:    v_mov_b32_e32 v33, v1
+; GFX908-NEXT:    v_mov_b32_e32 v34, v0
 ; GFX908-NEXT:    ;;#ASMSTART
 ; GFX908-NEXT:    ; def v[0:31] a[0:15]
 ; GFX908-NEXT:    ;;#ASMEND
@@ -62,7 +62,7 @@ define void @no_free_vgprs_at_agpr_copy(float %v0, float %v1) #0 {
 ; GFX908-NEXT:    s_nop 1
 ; GFX908-NEXT:    v_accvgpr_write_b32 a16, v32
 ; GFX908-NEXT:    s_nop 0
-; GFX908-NEXT:    v_mfma_f32_16x16x1f32 a[0:15], v33, v34, a[16:31]
+; GFX908-NEXT:    v_mfma_f32_16x16x1f32 a[0:15], v34, v33, a[16:31]
 ; GFX908-NEXT:    s_nop 7
 ; GFX908-NEXT:    s_nop 1
 ; GFX908-NEXT:    v_accvgpr_read_b32 v32, a0 ; Reload Reuse
@@ -159,8 +159,8 @@ define void @no_free_vgprs_at_agpr_copy(float %v0, float %v1) #0 {
 ; GFX90A-LABEL: no_free_vgprs_at_agpr_copy:
 ; GFX90A:       ; %bb.0:
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX90A-NEXT:    v_mov_b32_e32 v32, v0
-; GFX90A-NEXT:    v_mov_b32_e32 v33, v1
+; GFX90A-NEXT:    v_mov_b32_e32 v33, v0
+; GFX90A-NEXT:    v_mov_b32_e32 v32, v1
 ; GFX90A-NEXT:    ;;#ASMSTART
 ; GFX90A-NEXT:    ; def v[0:31] a[0:15]
 ; GFX90A-NEXT:    ;;#ASMEND
@@ -181,7 +181,7 @@ define void @no_free_vgprs_at_agpr_copy(float %v0, float %v1) #0 {
 ; GFX90A-NEXT:    v_accvgpr_mov_b32 a17, a1
 ; GFX90A-NEXT:    v_accvgpr_mov_b32 a16, a0
 ; GFX90A-NEXT:    s_nop 1
-; GFX90A-NEXT:    v_mfma_f32_16x16x1f32 a[0:15], v32, v33, a[16:31]
+; GFX90A-NEXT:    v_mfma_f32_16x16x1f32 a[0:15], v33, v32, a[16:31]
 ; GFX90A-NEXT:    s_nop 7
 ; GFX90A-NEXT:    s_nop 2
 ; GFX90A-NEXT:    buffer_store_dword a0, off, s[0:3], s32 ; 4-byte Folded Spill

diff  --git a/llvm/test/CodeGen/AMDGPU/amdpal-callable.ll b/llvm/test/CodeGen/AMDGPU/amdpal-callable.ll
index a64098cf5aea4..7ef295cf5e995 100644
--- a/llvm/test/CodeGen/AMDGPU/amdpal-callable.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdpal-callable.ll
@@ -172,9 +172,9 @@ attributes #0 = { nounwind }
 ; GCN-NEXT:        .vgpr_count:     0x1{{$}}
 ; GCN-NEXT:      no_stack_call:
 ; GCN-NEXT:        .lds_size:       0{{$}}
-; GCN-NEXT:        .sgpr_count:     0x26{{$}}
-; GCN-NEXT:        .stack_frame_size_in_bytes: 0{{$}}
-; GCN-NEXT:        .vgpr_count:     0x2{{$}}
+; GCN-NEXT:        .sgpr_count:     0x24{{$}}
+; GCN-NEXT:        .stack_frame_size_in_bytes: 0x10{{$}}
+; GCN-NEXT:        .vgpr_count:     0x3{{$}}
 ; GCN-NEXT:      no_stack_extern_call:
 ; GCN-NEXT:        .lds_size:       0{{$}}
 ; GFX8-NEXT:        .sgpr_count:     0x28{{$}}
@@ -210,9 +210,9 @@ attributes #0 = { nounwind }
 ; GCN-NEXT:        .vgpr_count:     0x2{{$}}
 ; GCN-NEXT:      simple_stack_call:
 ; GCN-NEXT:        .lds_size:       0{{$}}
-; GCN-NEXT:        .sgpr_count:     0x26{{$}}
+; GCN-NEXT:        .sgpr_count:     0x24{{$}}
 ; GCN-NEXT:        .stack_frame_size_in_bytes: 0x20{{$}}
-; GCN-NEXT:        .vgpr_count:     0x3{{$}}
+; GCN-NEXT:        .vgpr_count:     0x4{{$}}
 ; GCN-NEXT:      simple_stack_extern_call:
 ; GCN-NEXT:        .lds_size:       0{{$}}
 ; GFX8-NEXT:        .sgpr_count:     0x28{{$}}

diff  --git a/llvm/test/CodeGen/AMDGPU/branch-relax-spill.ll b/llvm/test/CodeGen/AMDGPU/branch-relax-spill.ll
index 8ea4dbed7866f..0f86d0ee03607 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relax-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relax-spill.ll
@@ -903,80 +903,80 @@ define void @spill_func(i32 addrspace(1)* %arg) #0 {
 ; CHECK-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
 ; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
 ; CHECK-NEXT:    s_waitcnt expcnt(1)
-; CHECK-NEXT:    v_writelane_b32 v0, s33, 0
-; CHECK-NEXT:    v_writelane_b32 v0, s34, 1
-; CHECK-NEXT:    v_writelane_b32 v0, s35, 2
-; CHECK-NEXT:    v_writelane_b32 v0, s36, 3
-; CHECK-NEXT:    v_writelane_b32 v0, s37, 4
-; CHECK-NEXT:    v_writelane_b32 v0, s38, 5
-; CHECK-NEXT:    v_writelane_b32 v0, s39, 6
-; CHECK-NEXT:    v_writelane_b32 v0, s40, 7
-; CHECK-NEXT:    v_writelane_b32 v0, s41, 8
-; CHECK-NEXT:    v_writelane_b32 v0, s42, 9
-; CHECK-NEXT:    v_writelane_b32 v0, s43, 10
-; CHECK-NEXT:    v_writelane_b32 v0, s44, 11
-; CHECK-NEXT:    v_writelane_b32 v0, s45, 12
-; CHECK-NEXT:    v_writelane_b32 v0, s46, 13
-; CHECK-NEXT:    v_writelane_b32 v0, s47, 14
-; CHECK-NEXT:    v_writelane_b32 v0, s48, 15
-; CHECK-NEXT:    v_writelane_b32 v0, s49, 16
-; CHECK-NEXT:    v_writelane_b32 v0, s50, 17
-; CHECK-NEXT:    v_writelane_b32 v0, s51, 18
-; CHECK-NEXT:    v_writelane_b32 v0, s52, 19
-; CHECK-NEXT:    v_writelane_b32 v0, s53, 20
-; CHECK-NEXT:    v_writelane_b32 v0, s54, 21
-; CHECK-NEXT:    v_writelane_b32 v0, s55, 22
-; CHECK-NEXT:    v_writelane_b32 v0, s56, 23
-; CHECK-NEXT:    v_writelane_b32 v0, s57, 24
-; CHECK-NEXT:    v_writelane_b32 v0, s58, 25
-; CHECK-NEXT:    v_writelane_b32 v0, s59, 26
-; CHECK-NEXT:    v_writelane_b32 v0, s60, 27
-; CHECK-NEXT:    v_writelane_b32 v0, s61, 28
-; CHECK-NEXT:    v_writelane_b32 v0, s62, 29
-; CHECK-NEXT:    v_writelane_b32 v0, s63, 30
-; CHECK-NEXT:    v_writelane_b32 v0, s64, 31
-; CHECK-NEXT:    v_writelane_b32 v0, s65, 32
-; CHECK-NEXT:    v_writelane_b32 v0, s66, 33
-; CHECK-NEXT:    v_writelane_b32 v0, s67, 34
-; CHECK-NEXT:    v_writelane_b32 v0, s68, 35
-; CHECK-NEXT:    v_writelane_b32 v0, s69, 36
-; CHECK-NEXT:    v_writelane_b32 v0, s70, 37
-; CHECK-NEXT:    v_writelane_b32 v0, s71, 38
-; CHECK-NEXT:    v_writelane_b32 v0, s72, 39
-; CHECK-NEXT:    v_writelane_b32 v0, s73, 40
-; CHECK-NEXT:    v_writelane_b32 v0, s74, 41
-; CHECK-NEXT:    v_writelane_b32 v0, s75, 42
-; CHECK-NEXT:    v_writelane_b32 v0, s76, 43
-; CHECK-NEXT:    v_writelane_b32 v0, s77, 44
-; CHECK-NEXT:    v_writelane_b32 v0, s78, 45
-; CHECK-NEXT:    v_writelane_b32 v0, s79, 46
-; CHECK-NEXT:    v_writelane_b32 v0, s80, 47
-; CHECK-NEXT:    v_writelane_b32 v0, s81, 48
-; CHECK-NEXT:    v_writelane_b32 v0, s82, 49
-; CHECK-NEXT:    v_writelane_b32 v0, s83, 50
-; CHECK-NEXT:    v_writelane_b32 v0, s84, 51
-; CHECK-NEXT:    v_writelane_b32 v0, s85, 52
-; CHECK-NEXT:    v_writelane_b32 v0, s86, 53
-; CHECK-NEXT:    v_writelane_b32 v0, s87, 54
-; CHECK-NEXT:    v_writelane_b32 v0, s88, 55
-; CHECK-NEXT:    v_writelane_b32 v0, s89, 56
-; CHECK-NEXT:    v_writelane_b32 v0, s90, 57
+; CHECK-NEXT:    v_writelane_b32 v0, s30, 0
+; CHECK-NEXT:    v_writelane_b32 v0, s31, 1
+; CHECK-NEXT:    v_writelane_b32 v0, s33, 2
+; CHECK-NEXT:    v_writelane_b32 v0, s34, 3
+; CHECK-NEXT:    v_writelane_b32 v0, s35, 4
+; CHECK-NEXT:    v_writelane_b32 v0, s36, 5
+; CHECK-NEXT:    v_writelane_b32 v0, s37, 6
+; CHECK-NEXT:    v_writelane_b32 v0, s38, 7
+; CHECK-NEXT:    v_writelane_b32 v0, s39, 8
+; CHECK-NEXT:    v_writelane_b32 v0, s40, 9
+; CHECK-NEXT:    v_writelane_b32 v0, s41, 10
+; CHECK-NEXT:    v_writelane_b32 v0, s42, 11
+; CHECK-NEXT:    v_writelane_b32 v0, s43, 12
+; CHECK-NEXT:    v_writelane_b32 v0, s44, 13
+; CHECK-NEXT:    v_writelane_b32 v0, s45, 14
+; CHECK-NEXT:    v_writelane_b32 v0, s46, 15
+; CHECK-NEXT:    v_writelane_b32 v0, s47, 16
+; CHECK-NEXT:    v_writelane_b32 v0, s48, 17
+; CHECK-NEXT:    v_writelane_b32 v0, s49, 18
+; CHECK-NEXT:    v_writelane_b32 v0, s50, 19
+; CHECK-NEXT:    v_writelane_b32 v0, s51, 20
+; CHECK-NEXT:    v_writelane_b32 v0, s52, 21
+; CHECK-NEXT:    v_writelane_b32 v0, s53, 22
+; CHECK-NEXT:    v_writelane_b32 v0, s54, 23
+; CHECK-NEXT:    v_writelane_b32 v0, s55, 24
+; CHECK-NEXT:    v_writelane_b32 v0, s56, 25
+; CHECK-NEXT:    v_writelane_b32 v0, s57, 26
+; CHECK-NEXT:    v_writelane_b32 v0, s58, 27
+; CHECK-NEXT:    v_writelane_b32 v0, s59, 28
+; CHECK-NEXT:    v_writelane_b32 v0, s60, 29
+; CHECK-NEXT:    v_writelane_b32 v0, s61, 30
+; CHECK-NEXT:    v_writelane_b32 v0, s62, 31
+; CHECK-NEXT:    v_writelane_b32 v0, s63, 32
+; CHECK-NEXT:    v_writelane_b32 v0, s64, 33
+; CHECK-NEXT:    v_writelane_b32 v0, s65, 34
+; CHECK-NEXT:    v_writelane_b32 v0, s66, 35
+; CHECK-NEXT:    v_writelane_b32 v0, s67, 36
+; CHECK-NEXT:    v_writelane_b32 v0, s68, 37
+; CHECK-NEXT:    v_writelane_b32 v0, s69, 38
+; CHECK-NEXT:    v_writelane_b32 v0, s70, 39
+; CHECK-NEXT:    v_writelane_b32 v0, s71, 40
+; CHECK-NEXT:    v_writelane_b32 v0, s72, 41
+; CHECK-NEXT:    v_writelane_b32 v0, s73, 42
+; CHECK-NEXT:    v_writelane_b32 v0, s74, 43
+; CHECK-NEXT:    v_writelane_b32 v0, s75, 44
+; CHECK-NEXT:    v_writelane_b32 v0, s76, 45
+; CHECK-NEXT:    v_writelane_b32 v0, s77, 46
+; CHECK-NEXT:    v_writelane_b32 v0, s78, 47
+; CHECK-NEXT:    v_writelane_b32 v0, s79, 48
+; CHECK-NEXT:    v_writelane_b32 v0, s80, 49
+; CHECK-NEXT:    v_writelane_b32 v0, s81, 50
+; CHECK-NEXT:    v_writelane_b32 v0, s82, 51
+; CHECK-NEXT:    v_writelane_b32 v0, s83, 52
+; CHECK-NEXT:    v_writelane_b32 v0, s84, 53
+; CHECK-NEXT:    v_writelane_b32 v0, s85, 54
+; CHECK-NEXT:    v_writelane_b32 v0, s86, 55
+; CHECK-NEXT:    v_writelane_b32 v0, s87, 56
+; CHECK-NEXT:    v_writelane_b32 v0, s88, 57
 ; CHECK-NEXT:    s_waitcnt expcnt(0)
-; CHECK-NEXT:    v_writelane_b32 v1, s97, 0
-; CHECK-NEXT:    v_writelane_b32 v0, s91, 58
-; CHECK-NEXT:    v_writelane_b32 v1, s98, 1
-; CHECK-NEXT:    v_writelane_b32 v0, s92, 59
-; CHECK-NEXT:    v_writelane_b32 v1, s99, 2
-; CHECK-NEXT:    v_writelane_b32 v0, s93, 60
-; CHECK-NEXT:    v_writelane_b32 v1, s100, 3
-; CHECK-NEXT:    v_writelane_b32 v0, s94, 61
-; CHECK-NEXT:    v_writelane_b32 v1, s101, 4
-; CHECK-NEXT:    v_writelane_b32 v0, s95, 62
-; CHECK-NEXT:    v_writelane_b32 v1, s30, 5
-; CHECK-NEXT:    s_mov_b32 s29, s12
-; CHECK-NEXT:    v_writelane_b32 v0, s96, 63
-; CHECK-NEXT:    v_writelane_b32 v1, s31, 6
-; CHECK-NEXT:    s_cmp_eq_u32 s29, 0
+; CHECK-NEXT:    v_writelane_b32 v1, s95, 0
+; CHECK-NEXT:    v_writelane_b32 v0, s89, 58
+; CHECK-NEXT:    v_writelane_b32 v1, s96, 1
+; CHECK-NEXT:    v_writelane_b32 v0, s90, 59
+; CHECK-NEXT:    v_writelane_b32 v1, s97, 2
+; CHECK-NEXT:    v_writelane_b32 v0, s91, 60
+; CHECK-NEXT:    v_writelane_b32 v1, s98, 3
+; CHECK-NEXT:    v_writelane_b32 v0, s92, 61
+; CHECK-NEXT:    v_writelane_b32 v1, s99, 4
+; CHECK-NEXT:    v_writelane_b32 v0, s93, 62
+; CHECK-NEXT:    v_writelane_b32 v1, s100, 5
+; CHECK-NEXT:    s_mov_b32 s31, s12
+; CHECK-NEXT:    v_writelane_b32 v0, s94, 63
+; CHECK-NEXT:    v_writelane_b32 v1, s101, 6
+; CHECK-NEXT:    s_cmp_eq_u32 s31, 0
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    s_mov_b32 s0, 0
 ; CHECK-NEXT:    ;;#ASMEND
@@ -1336,7 +1336,6 @@ define void @spill_func(i32 addrspace(1)* %arg) #0 {
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    ; reg use s5
 ; CHECK-NEXT:    ;;#ASMEND
-; CHECK-NEXT:    v_readlane_b32 s4, v1, 5
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    ; reg use s6
 ; CHECK-NEXT:    ;;#ASMEND
@@ -1631,82 +1630,83 @@ define void @spill_func(i32 addrspace(1)* %arg) #0 {
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    ; reg use vcc_hi
 ; CHECK-NEXT:    ;;#ASMEND
-; CHECK-NEXT:    v_readlane_b32 s5, v1, 6
-; CHECK-NEXT:    v_readlane_b32 s101, v1, 4
-; CHECK-NEXT:    v_readlane_b32 s100, v1, 3
-; CHECK-NEXT:    v_readlane_b32 s99, v1, 2
-; CHECK-NEXT:    v_readlane_b32 s98, v1, 1
-; CHECK-NEXT:    v_readlane_b32 s97, v1, 0
-; CHECK-NEXT:    v_readlane_b32 s96, v0, 63
-; CHECK-NEXT:    v_readlane_b32 s95, v0, 62
-; CHECK-NEXT:    v_readlane_b32 s94, v0, 61
-; CHECK-NEXT:    v_readlane_b32 s93, v0, 60
-; CHECK-NEXT:    v_readlane_b32 s92, v0, 59
-; CHECK-NEXT:    v_readlane_b32 s91, v0, 58
-; CHECK-NEXT:    v_readlane_b32 s90, v0, 57
-; CHECK-NEXT:    v_readlane_b32 s89, v0, 56
-; CHECK-NEXT:    v_readlane_b32 s88, v0, 55
-; CHECK-NEXT:    v_readlane_b32 s87, v0, 54
-; CHECK-NEXT:    v_readlane_b32 s86, v0, 53
-; CHECK-NEXT:    v_readlane_b32 s85, v0, 52
-; CHECK-NEXT:    v_readlane_b32 s84, v0, 51
-; CHECK-NEXT:    v_readlane_b32 s83, v0, 50
-; CHECK-NEXT:    v_readlane_b32 s82, v0, 49
-; CHECK-NEXT:    v_readlane_b32 s81, v0, 48
-; CHECK-NEXT:    v_readlane_b32 s80, v0, 47
-; CHECK-NEXT:    v_readlane_b32 s79, v0, 46
-; CHECK-NEXT:    v_readlane_b32 s78, v0, 45
-; CHECK-NEXT:    v_readlane_b32 s77, v0, 44
-; CHECK-NEXT:    v_readlane_b32 s76, v0, 43
-; CHECK-NEXT:    v_readlane_b32 s75, v0, 42
-; CHECK-NEXT:    v_readlane_b32 s74, v0, 41
-; CHECK-NEXT:    v_readlane_b32 s73, v0, 40
-; CHECK-NEXT:    v_readlane_b32 s72, v0, 39
-; CHECK-NEXT:    v_readlane_b32 s71, v0, 38
-; CHECK-NEXT:    v_readlane_b32 s70, v0, 37
-; CHECK-NEXT:    v_readlane_b32 s69, v0, 36
-; CHECK-NEXT:    v_readlane_b32 s68, v0, 35
-; CHECK-NEXT:    v_readlane_b32 s67, v0, 34
-; CHECK-NEXT:    v_readlane_b32 s66, v0, 33
-; CHECK-NEXT:    v_readlane_b32 s65, v0, 32
-; CHECK-NEXT:    v_readlane_b32 s64, v0, 31
-; CHECK-NEXT:    v_readlane_b32 s63, v0, 30
-; CHECK-NEXT:    v_readlane_b32 s62, v0, 29
-; CHECK-NEXT:    v_readlane_b32 s61, v0, 28
-; CHECK-NEXT:    v_readlane_b32 s60, v0, 27
-; CHECK-NEXT:    v_readlane_b32 s59, v0, 26
-; CHECK-NEXT:    v_readlane_b32 s58, v0, 25
-; CHECK-NEXT:    v_readlane_b32 s57, v0, 24
-; CHECK-NEXT:    v_readlane_b32 s56, v0, 23
-; CHECK-NEXT:    v_readlane_b32 s55, v0, 22
-; CHECK-NEXT:    v_readlane_b32 s54, v0, 21
-; CHECK-NEXT:    v_readlane_b32 s53, v0, 20
-; CHECK-NEXT:    v_readlane_b32 s52, v0, 19
-; CHECK-NEXT:    v_readlane_b32 s51, v0, 18
-; CHECK-NEXT:    v_readlane_b32 s50, v0, 17
-; CHECK-NEXT:    v_readlane_b32 s49, v0, 16
-; CHECK-NEXT:    v_readlane_b32 s48, v0, 15
-; CHECK-NEXT:    v_readlane_b32 s47, v0, 14
-; CHECK-NEXT:    v_readlane_b32 s46, v0, 13
-; CHECK-NEXT:    v_readlane_b32 s45, v0, 12
-; CHECK-NEXT:    v_readlane_b32 s44, v0, 11
-; CHECK-NEXT:    v_readlane_b32 s43, v0, 10
-; CHECK-NEXT:    v_readlane_b32 s42, v0, 9
-; CHECK-NEXT:    v_readlane_b32 s41, v0, 8
-; CHECK-NEXT:    v_readlane_b32 s40, v0, 7
-; CHECK-NEXT:    v_readlane_b32 s39, v0, 6
-; CHECK-NEXT:    v_readlane_b32 s38, v0, 5
-; CHECK-NEXT:    v_readlane_b32 s37, v0, 4
-; CHECK-NEXT:    v_readlane_b32 s36, v0, 3
-; CHECK-NEXT:    v_readlane_b32 s35, v0, 2
-; CHECK-NEXT:    v_readlane_b32 s34, v0, 1
-; CHECK-NEXT:    v_readlane_b32 s33, v0, 0
-; CHECK-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; CHECK-NEXT:    v_readlane_b32 s101, v1, 6
+; CHECK-NEXT:    v_readlane_b32 s100, v1, 5
+; CHECK-NEXT:    v_readlane_b32 s99, v1, 4
+; CHECK-NEXT:    v_readlane_b32 s98, v1, 3
+; CHECK-NEXT:    v_readlane_b32 s97, v1, 2
+; CHECK-NEXT:    v_readlane_b32 s96, v1, 1
+; CHECK-NEXT:    v_readlane_b32 s95, v1, 0
+; CHECK-NEXT:    v_readlane_b32 s94, v0, 63
+; CHECK-NEXT:    v_readlane_b32 s93, v0, 62
+; CHECK-NEXT:    v_readlane_b32 s92, v0, 61
+; CHECK-NEXT:    v_readlane_b32 s91, v0, 60
+; CHECK-NEXT:    v_readlane_b32 s90, v0, 59
+; CHECK-NEXT:    v_readlane_b32 s89, v0, 58
+; CHECK-NEXT:    v_readlane_b32 s88, v0, 57
+; CHECK-NEXT:    v_readlane_b32 s87, v0, 56
+; CHECK-NEXT:    v_readlane_b32 s86, v0, 55
+; CHECK-NEXT:    v_readlane_b32 s85, v0, 54
+; CHECK-NEXT:    v_readlane_b32 s84, v0, 53
+; CHECK-NEXT:    v_readlane_b32 s83, v0, 52
+; CHECK-NEXT:    v_readlane_b32 s82, v0, 51
+; CHECK-NEXT:    v_readlane_b32 s81, v0, 50
+; CHECK-NEXT:    v_readlane_b32 s80, v0, 49
+; CHECK-NEXT:    v_readlane_b32 s79, v0, 48
+; CHECK-NEXT:    v_readlane_b32 s78, v0, 47
+; CHECK-NEXT:    v_readlane_b32 s77, v0, 46
+; CHECK-NEXT:    v_readlane_b32 s76, v0, 45
+; CHECK-NEXT:    v_readlane_b32 s75, v0, 44
+; CHECK-NEXT:    v_readlane_b32 s74, v0, 43
+; CHECK-NEXT:    v_readlane_b32 s73, v0, 42
+; CHECK-NEXT:    v_readlane_b32 s72, v0, 41
+; CHECK-NEXT:    v_readlane_b32 s71, v0, 40
+; CHECK-NEXT:    v_readlane_b32 s70, v0, 39
+; CHECK-NEXT:    v_readlane_b32 s69, v0, 38
+; CHECK-NEXT:    v_readlane_b32 s68, v0, 37
+; CHECK-NEXT:    v_readlane_b32 s67, v0, 36
+; CHECK-NEXT:    v_readlane_b32 s66, v0, 35
+; CHECK-NEXT:    v_readlane_b32 s65, v0, 34
+; CHECK-NEXT:    v_readlane_b32 s64, v0, 33
+; CHECK-NEXT:    v_readlane_b32 s63, v0, 32
+; CHECK-NEXT:    v_readlane_b32 s62, v0, 31
+; CHECK-NEXT:    v_readlane_b32 s61, v0, 30
+; CHECK-NEXT:    v_readlane_b32 s60, v0, 29
+; CHECK-NEXT:    v_readlane_b32 s59, v0, 28
+; CHECK-NEXT:    v_readlane_b32 s58, v0, 27
+; CHECK-NEXT:    v_readlane_b32 s57, v0, 26
+; CHECK-NEXT:    v_readlane_b32 s56, v0, 25
+; CHECK-NEXT:    v_readlane_b32 s55, v0, 24
+; CHECK-NEXT:    v_readlane_b32 s54, v0, 23
+; CHECK-NEXT:    v_readlane_b32 s53, v0, 22
+; CHECK-NEXT:    v_readlane_b32 s52, v0, 21
+; CHECK-NEXT:    v_readlane_b32 s51, v0, 20
+; CHECK-NEXT:    v_readlane_b32 s50, v0, 19
+; CHECK-NEXT:    v_readlane_b32 s49, v0, 18
+; CHECK-NEXT:    v_readlane_b32 s48, v0, 17
+; CHECK-NEXT:    v_readlane_b32 s47, v0, 16
+; CHECK-NEXT:    v_readlane_b32 s46, v0, 15
+; CHECK-NEXT:    v_readlane_b32 s45, v0, 14
+; CHECK-NEXT:    v_readlane_b32 s44, v0, 13
+; CHECK-NEXT:    v_readlane_b32 s43, v0, 12
+; CHECK-NEXT:    v_readlane_b32 s42, v0, 11
+; CHECK-NEXT:    v_readlane_b32 s41, v0, 10
+; CHECK-NEXT:    v_readlane_b32 s40, v0, 9
+; CHECK-NEXT:    v_readlane_b32 s39, v0, 8
+; CHECK-NEXT:    v_readlane_b32 s38, v0, 7
+; CHECK-NEXT:    v_readlane_b32 s37, v0, 6
+; CHECK-NEXT:    v_readlane_b32 s36, v0, 5
+; CHECK-NEXT:    v_readlane_b32 s35, v0, 4
+; CHECK-NEXT:    v_readlane_b32 s34, v0, 3
+; CHECK-NEXT:    v_readlane_b32 s33, v0, 2
+; CHECK-NEXT:    v_readlane_b32 s31, v0, 1
+; CHECK-NEXT:    v_readlane_b32 s30, v0, 0
+; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; CHECK-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
 ; CHECK-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
-; CHECK-NEXT:    s_mov_b64 exec, s[6:7]
+; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
-; CHECK-NEXT:    s_setpc_b64 s[4:5]
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %cnd = tail call i32 @llvm.amdgcn.workgroup.id.x() #0
   %sgpr0 = tail call i32 asm sideeffect "s_mov_b32 s0, 0", "={s0}"() #0

diff  --git a/llvm/test/CodeGen/AMDGPU/call-graph-register-usage.ll b/llvm/test/CodeGen/AMDGPU/call-graph-register-usage.ll
index e91d62c4c3f2e..1a9ec6c31555f 100644
--- a/llvm/test/CodeGen/AMDGPU/call-graph-register-usage.ll
+++ b/llvm/test/CodeGen/AMDGPU/call-graph-register-usage.ll
@@ -17,9 +17,10 @@ define void @use_vcc() #1 {
 ; GCN: v_writelane_b32 v40, s30, 0
 ; GCN: v_writelane_b32 v40, s31, 1
 ; GCN: s_swappc_b64
-; GCN: v_readlane_b32 s4, v40, 0
-; GCN: v_readlane_b32 s5, v40, 1
+; GCN: v_readlane_b32 s31, v40, 1
+; GCN: v_readlane_b32 s30, v40, 0
 ; GCN: v_readlane_b32 s33, v40, 2
+; GCN: s_setpc_b64 s[30:31]
 ; GCN: ; NumSgprs: 36
 ; GCN: ; NumVgprs: 41
 define void @indirect_use_vcc() #1 {

diff  --git a/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll b/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll
index 162fdbba4bfab..595fe7bca1064 100644
--- a/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll
+++ b/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll
@@ -25,26 +25,26 @@ define amdgpu_kernel void @test_kernel_call_external_void_func_void_clobber_s30_
 ; MUBUF:   buffer_store_dword
 ; FLATSCR: scratch_store_dword
 ; GCN: v_writelane_b32 v40, s33, 4
-; GCN: v_writelane_b32 v40, s34, 0
-; GCN: v_writelane_b32 v40, s35, 1
-; GCN: v_writelane_b32 v40, s30, 2
-; GCN: v_writelane_b32 v40, s31, 3
+; GCN: v_writelane_b32 v40, s30, 0
+; GCN: v_writelane_b32 v40, s31, 1
+; GCN: v_writelane_b32 v40, s34, 2
+; GCN: v_writelane_b32 v40, s35, 3
 
 ; GCN: s_swappc_b64
 ; GCN-NEXT: ;;#ASMSTART
 ; GCN-NEXT: ;;#ASMEND
 ; GCN-NEXT: s_swappc_b64
-; MUBUF-DAG:   v_readlane_b32 s4, v40, 2
-; MUBUF-DAG:   v_readlane_b32 s5, v40, 3
-; FLATSCR-DAG: v_readlane_b32 s0, v40, 2
-; FLATSCR-DAG: v_readlane_b32 s1, v40, 3
-; GCN: v_readlane_b32 s35, v40, 1
-; GCN: v_readlane_b32 s34, v40, 0
+; GCN: v_readlane_b32 s35, v40, 3
+; GCN: v_readlane_b32 s34, v40, 2
+; MUBUF-DAG:   v_readlane_b32 s31, v40, 1
+; MUBUF-DAG:   v_readlane_b32 s30, v40, 0
+; FLATSCR-DAG: v_readlane_b32 s31, v40, 1
+; FLATSCR-DAG: v_readlane_b32 s30, v40, 0
 
 ; GCN: v_readlane_b32 s33, v40, 4
 ; MUBUF:   buffer_load_dword
 ; FLATSCR: scratch_load_dword
-; GCN: s_setpc_b64
+; GCN: s_setpc_b64 s[30:31]
 define void @test_func_call_external_void_func_void_clobber_s30_s31_call_external_void_func_void() #0 {
   call void @external_void_func_void()
   call void asm sideeffect "", ""() #0
@@ -74,11 +74,14 @@ define void @test_func_call_external_void_funcx2() #0 {
 
 ; GCN-LABEL: {{^}}void_func_void_clobber_s30_s31:
 ; GCN: s_waitcnt
-; GCN-NEXT: s_mov_b64 [[SAVEPC:s\[[0-9]+:[0-9]+\]]], s[30:31]
+; GCN: v_writelane_b32 v0, s30, 0
+; GCN: v_writelane_b32 v0, s31, 1
 ; GCN-NEXT: #ASMSTART
 ; GCN: ; clobber
 ; GCN-NEXT: #ASMEND
-; GCN-NEXT: s_setpc_b64 [[SAVEPC]]
+; GCN: v_readlane_b32 s31, v0, 1
+; GCN: v_readlane_b32 s30, v0, 0
+; GCN: s_setpc_b64 s[30:31]
 define void @void_func_void_clobber_s30_s31() #2 {
   call void asm sideeffect "; clobber", "~{s[30:31]}"() #0
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll b/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll
index 918ef1c6fd42f..6439b0dcfc9a6 100644
--- a/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll
+++ b/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll
@@ -102,10 +102,8 @@ define void @callee_with_stack_no_fp_elim_non_leaf() #2 {
 
 ; GCN: s_swappc_b64
 
-; MUBUF-DAG: v_readlane_b32 s5, [[CSR_VGPR]]
-; MUBUF-DAG: v_readlane_b32 s4, [[CSR_VGPR]]
-; FLATSCR-DAG: v_readlane_b32 s0, [[CSR_VGPR]]
-; FLATSCR-DAG: v_readlane_b32 s1, [[CSR_VGPR]]
+; GCN-DAG: v_readlane_b32 s30, [[CSR_VGPR]]
+; GCN-DAG: v_readlane_b32 s31, [[CSR_VGPR]]
 
 ; MUBUF:    s_addk_i32 s32, 0xfc00{{$}}
 ; FLATSCR:  s_add_i32 s32, s32, -16{{$}}
@@ -116,7 +114,7 @@ define void @callee_with_stack_no_fp_elim_non_leaf() #2 {
 ; GCN-NEXT: s_mov_b64 exec, [[COPY_EXEC1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
 
-; GCN-NEXT: s_setpc_b64
+; GCN-NEXT: s_setpc_b64 s[30:31]
 define void @callee_with_stack_and_call() #0 {
   %alloca = alloca i32, addrspace(5)
   store volatile i32 0, i32 addrspace(5)* %alloca
@@ -144,10 +142,8 @@ define void @callee_with_stack_and_call() #0 {
 ; GCN-DAG: v_writelane_b32 [[CSR_VGPR]], s31, 1
 ; GCN: s_swappc_b64
 
-; MUBUF-DAG: v_readlane_b32 s4, v40, 0
-; MUBUF-DAG: v_readlane_b32 s5, v40, 1
-; FLATSCR-DAG: v_readlane_b32 s0, v40, 0
-; FLATSCR-DAG: v_readlane_b32 s1, v40, 1
+; GCN-DAG: v_readlane_b32 s30, [[CSR_VGPR]], 0
+; GCN-DAG: v_readlane_b32 s31, [[CSR_VGPR]], 1
 
 ; MUBUF:   s_addk_i32 s32, 0xfc00
 ; FLATSCR: s_add_i32 s32, s32, -16
@@ -157,7 +153,7 @@ define void @callee_with_stack_and_call() #0 {
 ; FLATSCR-NEXT: scratch_load_dword [[CSR_VGPR]], off, s32 ; 4-byte Folded Reload
 ; GCN-NEXT: s_mov_b64 exec, [[COPY_EXEC1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: s_setpc_b64
+; GCN-NEXT: s_setpc_b64 s[30:31]
 define void @callee_no_stack_with_call() #0 {
   call void @external_void_func_void()
   ret void
@@ -393,31 +389,28 @@ define void @realign_stack_no_fp_elim() #1 {
 ; MUBUF-NEXT:   buffer_store_dword [[CSR_VGPR:v[0-9]+]], off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
 ; FLATSCR-NEXT: scratch_store_dword off, [[CSR_VGPR:v[0-9]+]], s32 offset:4 ; 4-byte Folded Spill
 ; GCN-NEXT: s_mov_b64 exec, [[COPY_EXEC0]]
-; GCN-NEXT: v_writelane_b32 v0, s33, 2
+; GCN-NEXT: v_writelane_b32 [[CSR_VGPR]], s33, 2
 ; GCN-NEXT: s_mov_b32 s33, s32
-; GCN-NEXT: v_writelane_b32 v0, s30, 0
+; GCN: v_writelane_b32 [[CSR_VGPR]], s30, 0
 ; GCN: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0
-; GCN: v_writelane_b32 v0, s31, 1
+; MUBUF:        s_addk_i32 s32, 0x300
+; FLATSCR:      s_add_i32 s32, s32, 12
+; GCN: v_writelane_b32 [[CSR_VGPR]], s31, 1
 ; MUBUF:   buffer_store_dword [[ZERO]], off, s[0:3], s33{{$}}
 ; FLATSCR: scratch_store_dword off, [[ZERO]], s33{{$}}
 ; GCN-NEXT:     s_waitcnt vmcnt(0)
 ; GCN: ;;#ASMSTART
-; MUBUF:        s_addk_i32 s32, 0x300
-; MUBUF-NEXT:   v_readlane_b32 s4, v0, 0
-; MUBUF-NEXT:   v_readlane_b32 s5, v0, 1
-; FLATSCR:      s_add_i32 s32, s32, 12
-; FLATSCR-NEXT: v_readlane_b32 s0, v0, 0
-; FLATSCR-NEXT: v_readlane_b32 s1, v0, 1
-; MUBUF-NEXT:   s_addk_i32 s32, 0xfd00
-; FLATSCR-NEXT: s_add_i32 s32, s32, -12
-; GCN-NEXT:     v_readlane_b32 s33, v0, 2
+; GCN:     v_readlane_b32 s31, [[CSR_VGPR]], 1
+; GCN:     v_readlane_b32 s30, [[CSR_VGPR]], 0
+; MUBUF:   s_addk_i32 s32, 0xfd00
+; FLATSCR: s_add_i32 s32, s32, -12
+; GCN-NEXT:     v_readlane_b32 s33, [[CSR_VGPR]], 2
 ; GCN-NEXT: s_or_saveexec_b64 [[COPY_EXEC1:s\[[0-9]+:[0-9]+\]]], -1{{$}}
 ; MUBUF-NEXT:   buffer_load_dword [[CSR_VGPR]], off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
 ; FLATSCR-NEXT: scratch_load_dword [[CSR_VGPR]], off, s32 offset:4 ; 4-byte Folded Reload
 ; GCN-NEXT: s_mov_b64 exec, [[COPY_EXEC1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
-; MUBUF-NEXT:   s_setpc_b64 s[4:5]
-; FLATSCR-NEXT: s_setpc_b64 s[0:1]
+; GCN-NEXT: s_setpc_b64 s[30:31]
 define void @no_unused_non_csr_sgpr_for_fp() #1 {
   %alloca = alloca i32, addrspace(5)
   store volatile i32 0, i32 addrspace(5)* %alloca
@@ -441,28 +434,22 @@ define void @no_unused_non_csr_sgpr_for_fp() #1 {
 ; GCN-NEXT: s_mov_b64 exec, [[COPY_EXEC0]]
 ; GCN-NEXT: v_writelane_b32 [[CSR_VGPR]], s33, 2
 ; GCN-NEXT: s_mov_b32 s33, s32
-; GCN-NEXT: v_writelane_b32 [[CSR_VGPR]], s30, 0
+; MUBUF:       s_addk_i32 s32, 0x300{{$}}
+; FLATSCR:     s_add_i32 s32, s32, 12{{$}}
 
-; GCN-DAG: v_writelane_b32 [[CSR_VGPR]], s31, 1
 ; MUBUF-DAG:   buffer_store_dword
 ; FLATSCR-DAG: scratch_store_dword
-; MUBUF:       s_addk_i32 s32, 0x300{{$}}
-; FLATSCR:     s_add_i32 s32, s32, 12{{$}}
 
-; MUBUF:        v_readlane_b32 s4, [[CSR_VGPR]], 0
-; FLATSCR:      v_readlane_b32 s0, [[CSR_VGPR]], 0
 ; GCN: ;;#ASMSTART
-; MUBUF:        v_readlane_b32 s5, [[CSR_VGPR]], 1
-; FLATSCR:      v_readlane_b32 s1, [[CSR_VGPR]], 1
-; MUBUF-NEXT:   s_addk_i32 s32, 0xfd00{{$}}
-; FLATSCR-NEXT: s_add_i32 s32, s32, -12{{$}}
+; MUBUF:   s_addk_i32 s32, 0xfd00{{$}}
+; FLATSCR: s_add_i32 s32, s32, -12{{$}}
 ; GCN-NEXT: v_readlane_b32 s33, [[CSR_VGPR]], 2
 ; GCN-NEXT: s_or_saveexec_b64 [[COPY_EXEC1:s\[[0-9]+:[0-9]+\]]], -1{{$}}
 ; MUBUF-NEXT:   buffer_load_dword [[CSR_VGPR]], off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
 ; FLATSCR-NEXT: scratch_load_dword [[CSR_VGPR]], off, s32 offset:4 ; 4-byte Folded Reload
 ; GCN-NEXT: s_mov_b64 exec, [[COPY_EXEC1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: s_setpc_b64
+; GCN-NEXT: s_setpc_b64 s[30:31]
 define void @no_unused_non_csr_sgpr_for_fp_no_scratch_vgpr() #1 {
   %alloca = alloca i32, addrspace(5)
   store volatile i32 0, i32 addrspace(5)* %alloca
@@ -494,21 +481,15 @@ define void @no_unused_non_csr_sgpr_for_fp_no_scratch_vgpr() #1 {
 ; FLATSCR-NEXT: scratch_store_dword off, [[CSR_VGPR:v[0-9]+]], [[SCRATCH_SGPR]] ; 4-byte Folded Spill
 ; GCN-NEXT: s_mov_b64 exec, [[COPY_EXEC0]]
 ; GCN-NEXT: v_writelane_b32 [[CSR_VGPR]], s33, 2
-; GCN-DAG:  v_writelane_b32 [[CSR_VGPR]], s30, 0
 ; GCN-DAG:  s_mov_b32 s33, s32
-; GCN-DAG:  v_writelane_b32 [[CSR_VGPR]], s31, 1
 ; MUBUF-DAG:   s_add_i32 s32, s32, 0x40300{{$}}
 ; FLATSCR-DAG: s_addk_i32 s32, 0x100c{{$}}
 ; MUBUF-DAG:   buffer_store_dword
 ; FLATSCR-DAG: scratch_store_dword
 
-; MUBUF:   v_readlane_b32 s4, [[CSR_VGPR]], 0
-; FLATSCR: v_readlane_b32 s0, [[CSR_VGPR]], 0
 ; GCN: ;;#ASMSTART
-; MUBUF:   v_readlane_b32 s5, [[CSR_VGPR]], 1
-; FLATSCR: v_readlane_b32 s1, [[CSR_VGPR]], 1
-; MUBUF-NEXT:   s_add_i32 s32, s32, 0xfffbfd00{{$}}
-; FLATSCR-NEXT: s_addk_i32 s32, 0xeff4{{$}}
+; MUBUF:   s_add_i32 s32, s32, 0xfffbfd00{{$}}
+; FLATSCR: s_addk_i32 s32, 0xeff4{{$}}
 ; GCN-NEXT: v_readlane_b32 s33, [[CSR_VGPR]], 2
 ; GCN-NEXT: s_or_saveexec_b64 [[COPY_EXEC1:s\[[0-9]+:[0-9]+\]]], -1{{$}}
 ; MUBUF-NEXT: s_add_i32 [[SCRATCH_SGPR:s[0-9]+]], s32, 0x40100
@@ -517,7 +498,7 @@ define void @no_unused_non_csr_sgpr_for_fp_no_scratch_vgpr() #1 {
 ; FLATSCR-NEXT: scratch_load_dword [[CSR_VGPR]], off, [[SCRATCH_SGPR]] ; 4-byte Folded Reload
 ; GCN-NEXT: s_mov_b64 exec, [[COPY_EXEC1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: s_setpc_b64
+; GCN-NEXT: s_setpc_b64 s[30:31]
 define void @scratch_reg_needed_mubuf_offset([4096 x i8] addrspace(5)* byval([4096 x i8]) align 4 %arg) #1 {
   %alloca = alloca i32, addrspace(5)
   store volatile i32 0, i32 addrspace(5)* %alloca
@@ -549,7 +530,7 @@ define internal void @local_empty_func() #0 {
 ; An FP is needed, despite not needing any spills
 ; TODO: Ccould see callee does not use stack and omit FP.
 ; GCN-LABEL: {{^}}ipra_call_with_stack:
-; GCN: s_mov_b32 [[FP_COPY:s[0-9]+]], s33
+; GCN: v_writelane_b32 v0, s33, 2
 ; GCN: s_mov_b32 s33, s32
 ; MUBUF:   s_addk_i32 s32, 0x400
 ; FLATSCR: s_add_i32 s32, s32, 16
@@ -558,7 +539,7 @@ define internal void @local_empty_func() #0 {
 ; GCN:     s_swappc_b64
 ; MUBUF:   s_addk_i32 s32, 0xfc00
 ; FLATSCR: s_add_i32 s32, s32, -16
-; GCN: s_mov_b32 s33, [[FP_COPY:s[0-9]+]]
+; GCN: v_readlane_b32 s33, v0, 2
 define void @ipra_call_with_stack() #0 {
   %alloca = alloca i32, addrspace(5)
   store volatile i32 0, i32 addrspace(5)* %alloca

diff  --git a/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs-fixed-abi.ll b/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs-fixed-abi.ll
index 6a8d26c441a16..3f0c627641ac9 100644
--- a/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs-fixed-abi.ll
+++ b/llvm/test/CodeGen/AMDGPU/callee-special-input-sgprs-fixed-abi.ll
@@ -317,7 +317,7 @@ define amdgpu_kernel void @kern_indirect_use_workgroup_id_yz() #1 {
 ; GCN-NOT: s12
 ; GCN-NOT: s13
 ; GCN-NOT: s14
-; GCN: v_readlane_b32 s4, v40, 0
+; GCN: v_readlane_b32 s30, v40, 0
 define hidden void @func_indirect_use_workgroup_id_x() #1 {
   call void @use_workgroup_id_x()
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll b/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
index 82dddcb6a1481..29fc098899ee5 100644
--- a/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
+++ b/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
@@ -39,15 +39,15 @@ define float @call_split_type_used_outside_block_v2f32() #0 {
 ; GCN-NEXT:    s_add_u32 s16, s16, func_v2f32 at rel32@lo+4
 ; GCN-NEXT:    s_addc_u32 s17, s17, func_v2f32 at rel32@hi+12
 ; GCN-NEXT:    s_swappc_b64 s[30:31], s[16:17]
-; GCN-NEXT:    v_readlane_b32 s4, v40, 0
-; GCN-NEXT:    v_readlane_b32 s5, v40, 1
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
 ; GCN-NEXT:    v_readlane_b32 s33, v40, 2
-; GCN-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 bb0:
   %split.ret.type = call <2 x float> @func_v2f32()
   br label %bb1
@@ -73,15 +73,15 @@ define float @call_split_type_used_outside_block_v3f32() #0 {
 ; GCN-NEXT:    s_add_u32 s16, s16, func_v3f32 at rel32@lo+4
 ; GCN-NEXT:    s_addc_u32 s17, s17, func_v3f32 at rel32@hi+12
 ; GCN-NEXT:    s_swappc_b64 s[30:31], s[16:17]
-; GCN-NEXT:    v_readlane_b32 s4, v40, 0
-; GCN-NEXT:    v_readlane_b32 s5, v40, 1
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
 ; GCN-NEXT:    v_readlane_b32 s33, v40, 2
-; GCN-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 bb0:
   %split.ret.type = call <3 x float> @func_v3f32()
   br label %bb1
@@ -107,15 +107,15 @@ define half @call_split_type_used_outside_block_v4f16() #0 {
 ; GCN-NEXT:    s_add_u32 s16, s16, func_v4f16 at rel32@lo+4
 ; GCN-NEXT:    s_addc_u32 s17, s17, func_v4f16 at rel32@hi+12
 ; GCN-NEXT:    s_swappc_b64 s[30:31], s[16:17]
-; GCN-NEXT:    v_readlane_b32 s4, v40, 0
-; GCN-NEXT:    v_readlane_b32 s5, v40, 1
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
 ; GCN-NEXT:    v_readlane_b32 s33, v40, 2
-; GCN-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 bb0:
   %split.ret.type = call <4 x half> @func_v4f16()
   br label %bb1
@@ -141,16 +141,16 @@ define { i32, half } @call_split_type_used_outside_block_struct() #0 {
 ; GCN-NEXT:    s_add_u32 s16, s16, func_struct at rel32@lo+4
 ; GCN-NEXT:    s_addc_u32 s17, s17, func_struct at rel32@hi+12
 ; GCN-NEXT:    s_swappc_b64 s[30:31], s[16:17]
-; GCN-NEXT:    v_readlane_b32 s4, v40, 0
 ; GCN-NEXT:    v_mov_b32_e32 v1, v4
-; GCN-NEXT:    v_readlane_b32 s5, v40, 1
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
 ; GCN-NEXT:    v_readlane_b32 s33, v40, 2
-; GCN-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 bb0:
   %split.ret.type = call { <4 x i32>, <4 x half> } @func_struct()
   br label %bb1

diff  --git a/llvm/test/CodeGen/AMDGPU/dagcombine-lshr-and-cmp.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-lshr-and-cmp.ll
index e1fe562c45773..3c22802d90118 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcombine-lshr-and-cmp.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcombine-lshr-and-cmp.ll
@@ -5,11 +5,10 @@ define i32 @divergent_lshr_and_cmp(i32 %x) {
   ; GCN-LABEL: name: divergent_lshr_and_cmp
   ; GCN: bb.0.entry:
   ; GCN-NEXT:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; GCN-NEXT:   liveins: $vgpr0, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0
   ; GCN-NEXT: {{  $}}
-  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr30_sgpr31
-  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 2, [[COPY1]], implicit $exec
+  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 2, [[COPY]], implicit $exec
   ; GCN-NEXT:   [[V_CMP_NE_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_NE_U32_e64 killed [[V_AND_B32_e64_]], 0, implicit $exec
   ; GCN-NEXT:   [[SI_IF:%[0-9]+]]:sreg_64 = SI_IF killed [[V_CMP_NE_U32_e64_]], %bb.2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
   ; GCN-NEXT:   S_BRANCH %bb.1
@@ -18,16 +17,14 @@ define i32 @divergent_lshr_and_cmp(i32 %x) {
   ; GCN-NEXT:   successors: %bb.2(0x80000000)
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 2
-  ; GCN-NEXT:   [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 killed [[S_MOV_B32_]], [[COPY1]], implicit $exec
+  ; GCN-NEXT:   [[V_LSHLREV_B32_e64_:%[0-9]+]]:vgpr_32 = V_LSHLREV_B32_e64 killed [[S_MOV_B32_]], [[COPY]], implicit $exec
   ; GCN-NEXT:   S_BRANCH %bb.2
   ; GCN-NEXT: {{  $}}
   ; GCN-NEXT: bb.2.UnifiedReturnBlock:
-  ; GCN-NEXT:   [[PHI:%[0-9]+]]:vgpr_32 = PHI [[COPY1]], %bb.0, [[V_LSHLREV_B32_e64_]], %bb.1
+  ; GCN-NEXT:   [[PHI:%[0-9]+]]:vgpr_32 = PHI [[COPY]], %bb.0, [[V_LSHLREV_B32_e64_]], %bb.1
   ; GCN-NEXT:   SI_END_CF [[SI_IF]], implicit-def dead $exec, implicit-def dead $scc, implicit $exec
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
   ; GCN-NEXT:   $vgpr0 = COPY [[PHI]]
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY3]], implicit $vgpr0
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   %0 = and i32 %x, 2
   %1 = icmp ne i32 %0, 0

diff  --git a/llvm/test/CodeGen/AMDGPU/divergence-driven-trunc-to-i1.ll b/llvm/test/CodeGen/AMDGPU/divergence-driven-trunc-to-i1.ll
index b7b2fe5d02492..cd40dc678dcd0 100644
--- a/llvm/test/CodeGen/AMDGPU/divergence-driven-trunc-to-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergence-driven-trunc-to-i1.ll
@@ -34,22 +34,19 @@ define amdgpu_kernel void @uniform_trunc_i16_to_i1(i1 addrspace(1)* %out, i16 %x
 define i1 @divergent_trunc_i16_to_i1(i1 addrspace(1)* %out, i16 %x, i1 %z) {
   ; GCN-LABEL: name: divergent_trunc_i16_to_i1
   ; GCN: bb.0 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr2, $vgpr3
   ; GCN-NEXT: {{  $}}
-  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr30_sgpr31
-  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr3
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, [[COPY1]], implicit $exec
+  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, [[COPY]], implicit $exec
   ; GCN-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_AND_B32_e64_]], 1, implicit $exec
-  ; GCN-NEXT:   [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[COPY2]], 0, 16, implicit $exec
+  ; GCN-NEXT:   [[V_BFE_I32_e64_:%[0-9]+]]:vgpr_32 = V_BFE_I32_e64 [[COPY1]], 0, 16, implicit $exec
   ; GCN-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
   ; GCN-NEXT:   [[V_CMP_LT_I32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I32_e64 killed [[V_BFE_I32_e64_]], killed [[S_MOV_B32_]], implicit $exec
   ; GCN-NEXT:   [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[V_CMP_LT_I32_e64_]], killed [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
   ; GCN-NEXT:   [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
   ; GCN-NEXT:   $vgpr0 = COPY [[V_CNDMASK_B32_e64_]]
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
   %setcc = icmp slt i16 %x, 0
   %select = select i1 %setcc, i1 true, i1 %z
   ret i1 %select
@@ -89,21 +86,18 @@ define amdgpu_kernel void @uniform_trunc_i32_to_i1(i1 addrspace(1)* %out, i32 %x
 define i1 @divergent_trunc_i32_to_i1(i1 addrspace(1)* %out, i32 %x, i1 %z) {
   ; GCN-LABEL: name: divergent_trunc_i32_to_i1
   ; GCN: bb.0 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $vgpr2, $vgpr3, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr2, $vgpr3
   ; GCN-NEXT: {{  $}}
-  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr30_sgpr31
-  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr3
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, [[COPY1]], implicit $exec
+  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, [[COPY]], implicit $exec
   ; GCN-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_AND_B32_e64_]], 1, implicit $exec
   ; GCN-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 0
-  ; GCN-NEXT:   [[V_CMP_LT_I32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I32_e64 [[COPY2]], killed [[S_MOV_B32_]], implicit $exec
+  ; GCN-NEXT:   [[V_CMP_LT_I32_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I32_e64 [[COPY1]], killed [[S_MOV_B32_]], implicit $exec
   ; GCN-NEXT:   [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[V_CMP_LT_I32_e64_]], killed [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
   ; GCN-NEXT:   [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
   ; GCN-NEXT:   $vgpr0 = COPY [[V_CNDMASK_B32_e64_]]
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
   %setcc = icmp slt i32 %x, 0
   %select = select i1 %setcc, i1 true, i1 %z
   ret i1 %select
@@ -147,24 +141,21 @@ define amdgpu_kernel void @uniform_trunc_i64_to_i1(i1 addrspace(1)* %out, i64 %x
 define i1 @divergent_trunc_i64_to_i1(i1 addrspace(1)* %out, i64 %x, i1 %z) {
   ; GCN-LABEL: name: divergent_trunc_i64_to_i1
   ; GCN: bb.0 (%ir-block.0):
-  ; GCN-NEXT:   liveins: $vgpr2, $vgpr3, $vgpr4, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr2, $vgpr3, $vgpr4
   ; GCN-NEXT: {{  $}}
-  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr30_sgpr31
-  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr4
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr3
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-  ; GCN-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY3]], %subreg.sub0, [[COPY2]], %subreg.sub1
-  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, [[COPY1]], implicit $exec
+  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr4
+  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr3
+  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
+  ; GCN-NEXT:   [[REG_SEQUENCE:%[0-9]+]]:sreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
+  ; GCN-NEXT:   [[V_AND_B32_e64_:%[0-9]+]]:vgpr_32 = V_AND_B32_e64 1, [[COPY]], implicit $exec
   ; GCN-NEXT:   [[V_CMP_EQ_U32_e64_:%[0-9]+]]:sreg_64 = V_CMP_EQ_U32_e64 killed [[V_AND_B32_e64_]], 1, implicit $exec
   ; GCN-NEXT:   [[S_MOV_B64_:%[0-9]+]]:sreg_64 = S_MOV_B64 0
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:vreg_64 = COPY killed [[S_MOV_B64_]]
-  ; GCN-NEXT:   [[V_CMP_LT_I64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I64_e64 killed [[REG_SEQUENCE]], [[COPY4]], implicit $exec
+  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:vreg_64 = COPY killed [[S_MOV_B64_]]
+  ; GCN-NEXT:   [[V_CMP_LT_I64_e64_:%[0-9]+]]:sreg_64 = V_CMP_LT_I64_e64 killed [[REG_SEQUENCE]], [[COPY3]], implicit $exec
   ; GCN-NEXT:   [[S_OR_B64_:%[0-9]+]]:sreg_64_xexec = S_OR_B64 killed [[V_CMP_LT_I64_e64_]], killed [[V_CMP_EQ_U32_e64_]], implicit-def dead $scc
   ; GCN-NEXT:   [[V_CNDMASK_B32_e64_:%[0-9]+]]:vgpr_32 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed [[S_OR_B64_]], implicit $exec
-  ; GCN-NEXT:   [[COPY5:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
   ; GCN-NEXT:   $vgpr0 = COPY [[V_CNDMASK_B32_e64_]]
-  ; GCN-NEXT:   [[COPY6:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY5]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY6]], implicit $vgpr0
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
   %setcc = icmp slt i64 %x, 0
   %select = select i1 %setcc, i1 true, i1 %z
   ret i1 %select

diff  --git a/llvm/test/CodeGen/AMDGPU/fdiv-nofpexcept.ll b/llvm/test/CodeGen/AMDGPU/fdiv-nofpexcept.ll
index 121d5d35bfe7a..cf610f9436acd 100644
--- a/llvm/test/CodeGen/AMDGPU/fdiv-nofpexcept.ll
+++ b/llvm/test/CodeGen/AMDGPU/fdiv-nofpexcept.ll
@@ -7,32 +7,29 @@
 define float @fdiv_f32(float %a, float %b) #0 {
   ; GCN-LABEL: name: fdiv_f32
   ; GCN: bb.0.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1
   ; GCN-NEXT: {{  $}}
-  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr30_sgpr31
-  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-  ; GCN-NEXT:   %6:vgpr_32, %7:sreg_64 = nofpexcept V_DIV_SCALE_F32_e64 0, [[COPY2]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %8:vgpr_32, %9:sreg_64 = nofpexcept V_DIV_SCALE_F32_e64 0, [[COPY1]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %10:vgpr_32 = nofpexcept V_RCP_F32_e64 0, %8, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; GCN-NEXT:   %4:vgpr_32, %5:sreg_64 = nofpexcept V_DIV_SCALE_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %6:vgpr_32, %7:sreg_64 = nofpexcept V_DIV_SCALE_F32_e64 0, [[COPY]], 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %8:vgpr_32 = nofpexcept V_RCP_F32_e64 0, %6, 0, 0, implicit $mode, implicit $exec
   ; GCN-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 3
   ; GCN-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 1065353216
   ; GCN-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0
   ; GCN-NEXT:   S_SETREG_B32_mode killed [[S_MOV_B32_]], 2305, implicit-def $mode, implicit $mode
-  ; GCN-NEXT:   %14:vgpr_32 = nofpexcept V_FMA_F32_e64 1, %8, 0, %10, 0, killed [[S_MOV_B32_1]], 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %15:vgpr_32 = nofpexcept V_FMA_F32_e64 0, killed %14, 0, %10, 0, %10, 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %16:vgpr_32 = nofpexcept V_MUL_F32_e64 0, %6, 0, %15, 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %17:vgpr_32 = nofpexcept V_FMA_F32_e64 1, %8, 0, %16, 0, %6, 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %18:vgpr_32 = nofpexcept V_FMA_F32_e64 0, killed %17, 0, %15, 0, %16, 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %19:vgpr_32 = nofpexcept V_FMA_F32_e64 1, %8, 0, %18, 0, %6, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %12:vgpr_32 = nofpexcept V_FMA_F32_e64 1, %6, 0, %8, 0, killed [[S_MOV_B32_1]], 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %13:vgpr_32 = nofpexcept V_FMA_F32_e64 0, killed %12, 0, %8, 0, %8, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %14:vgpr_32 = nofpexcept V_MUL_F32_e64 0, %4, 0, %13, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %15:vgpr_32 = nofpexcept V_FMA_F32_e64 1, %6, 0, %14, 0, %4, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %16:vgpr_32 = nofpexcept V_FMA_F32_e64 0, killed %15, 0, %13, 0, %14, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %17:vgpr_32 = nofpexcept V_FMA_F32_e64 1, %6, 0, %16, 0, %4, 0, 0, implicit $mode, implicit $exec
   ; GCN-NEXT:   S_SETREG_B32_mode killed [[S_MOV_B32_2]], 2305, implicit-def dead $mode, implicit $mode
-  ; GCN-NEXT:   $vcc = COPY %7
-  ; GCN-NEXT:   %20:vgpr_32 = nofpexcept V_DIV_FMAS_F32_e64 0, killed %19, 0, %15, 0, %18, 0, 0, implicit $mode, implicit $vcc, implicit $exec
-  ; GCN-NEXT:   %21:vgpr_32 = nofpexcept V_DIV_FIXUP_F32_e64 0, killed %20, 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN-NEXT:   $vgpr0 = COPY %21
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+  ; GCN-NEXT:   $vcc = COPY %5
+  ; GCN-NEXT:   %18:vgpr_32 = nofpexcept V_DIV_FMAS_F32_e64 0, killed %17, 0, %13, 0, %16, 0, 0, implicit $mode, implicit $vcc, implicit $exec
+  ; GCN-NEXT:   %19:vgpr_32 = nofpexcept V_DIV_FIXUP_F32_e64 0, killed %18, 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   $vgpr0 = COPY %19
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   %fdiv = fdiv float %a, %b
   ret float %fdiv
@@ -41,32 +38,29 @@ entry:
 define float @fdiv_nnan_f32(float %a, float %b) #0 {
   ; GCN-LABEL: name: fdiv_nnan_f32
   ; GCN: bb.0.entry:
-  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
+  ; GCN-NEXT:   liveins: $vgpr0, $vgpr1
   ; GCN-NEXT: {{  $}}
-  ; GCN-NEXT:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr30_sgpr31
-  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
-  ; GCN-NEXT:   [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-  ; GCN-NEXT:   %6:vgpr_32, %7:sreg_64 = nnan nofpexcept V_DIV_SCALE_F32_e64 0, [[COPY2]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %8:vgpr_32, %9:sreg_64 = nnan nofpexcept V_DIV_SCALE_F32_e64 0, [[COPY1]], 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %10:vgpr_32 = nnan nofpexcept V_RCP_F32_e64 0, %8, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+  ; GCN-NEXT:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+  ; GCN-NEXT:   %4:vgpr_32, %5:sreg_64 = nnan nofpexcept V_DIV_SCALE_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %6:vgpr_32, %7:sreg_64 = nnan nofpexcept V_DIV_SCALE_F32_e64 0, [[COPY]], 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %8:vgpr_32 = nnan nofpexcept V_RCP_F32_e64 0, %6, 0, 0, implicit $mode, implicit $exec
   ; GCN-NEXT:   [[S_MOV_B32_:%[0-9]+]]:sreg_32 = S_MOV_B32 3
   ; GCN-NEXT:   [[S_MOV_B32_1:%[0-9]+]]:sgpr_32 = S_MOV_B32 1065353216
   ; GCN-NEXT:   [[S_MOV_B32_2:%[0-9]+]]:sreg_32 = S_MOV_B32 0
   ; GCN-NEXT:   S_SETREG_B32_mode killed [[S_MOV_B32_]], 2305, implicit-def $mode, implicit $mode
-  ; GCN-NEXT:   %14:vgpr_32 = nnan nofpexcept V_FMA_F32_e64 1, %8, 0, %10, 0, killed [[S_MOV_B32_1]], 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %15:vgpr_32 = nnan nofpexcept V_FMA_F32_e64 0, killed %14, 0, %10, 0, %10, 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %16:vgpr_32 = nnan nofpexcept V_MUL_F32_e64 0, %6, 0, %15, 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %17:vgpr_32 = nnan nofpexcept V_FMA_F32_e64 1, %8, 0, %16, 0, %6, 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %18:vgpr_32 = nnan nofpexcept V_FMA_F32_e64 0, killed %17, 0, %15, 0, %16, 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   %19:vgpr_32 = nnan nofpexcept V_FMA_F32_e64 1, %8, 0, %18, 0, %6, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %12:vgpr_32 = nnan nofpexcept V_FMA_F32_e64 1, %6, 0, %8, 0, killed [[S_MOV_B32_1]], 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %13:vgpr_32 = nnan nofpexcept V_FMA_F32_e64 0, killed %12, 0, %8, 0, %8, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %14:vgpr_32 = nnan nofpexcept V_MUL_F32_e64 0, %4, 0, %13, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %15:vgpr_32 = nnan nofpexcept V_FMA_F32_e64 1, %6, 0, %14, 0, %4, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %16:vgpr_32 = nnan nofpexcept V_FMA_F32_e64 0, killed %15, 0, %13, 0, %14, 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   %17:vgpr_32 = nnan nofpexcept V_FMA_F32_e64 1, %6, 0, %16, 0, %4, 0, 0, implicit $mode, implicit $exec
   ; GCN-NEXT:   S_SETREG_B32_mode killed [[S_MOV_B32_2]], 2305, implicit-def dead $mode, implicit $mode
-  ; GCN-NEXT:   $vcc = COPY %7
-  ; GCN-NEXT:   %20:vgpr_32 = nnan nofpexcept V_DIV_FMAS_F32_e64 0, killed %19, 0, %15, 0, %18, 0, 0, implicit $mode, implicit $vcc, implicit $exec
-  ; GCN-NEXT:   %21:vgpr_32 = nnan nofpexcept V_DIV_FIXUP_F32_e64 0, killed %20, 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $mode, implicit $exec
-  ; GCN-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
-  ; GCN-NEXT:   $vgpr0 = COPY %21
-  ; GCN-NEXT:   [[COPY4:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY3]]
-  ; GCN-NEXT:   S_SETPC_B64_return [[COPY4]], implicit $vgpr0
+  ; GCN-NEXT:   $vcc = COPY %5
+  ; GCN-NEXT:   %18:vgpr_32 = nnan nofpexcept V_DIV_FMAS_F32_e64 0, killed %17, 0, %13, 0, %16, 0, 0, implicit $mode, implicit $vcc, implicit $exec
+  ; GCN-NEXT:   %19:vgpr_32 = nnan nofpexcept V_DIV_FIXUP_F32_e64 0, killed %18, 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
+  ; GCN-NEXT:   $vgpr0 = COPY %19
+  ; GCN-NEXT:   SI_RETURN implicit $vgpr0
 entry:
   %fdiv = fdiv nnan float %a, %b
   ret float %fdiv

diff  --git a/llvm/test/CodeGen/AMDGPU/fix-frame-ptr-reg-copy-livein.ll b/llvm/test/CodeGen/AMDGPU/fix-frame-ptr-reg-copy-livein.ll
index 408e00f1f37d9..47287f67b01ef 100644
--- a/llvm/test/CodeGen/AMDGPU/fix-frame-ptr-reg-copy-livein.ll
+++ b/llvm/test/CodeGen/AMDGPU/fix-frame-ptr-reg-copy-livein.ll
@@ -10,17 +10,17 @@
 define i32 @fp_save_restore_in_temp_sgpr(%struct.Data addrspace(5)* nocapture readonly byval(%struct.Data) align 4 %arg) #0 {
   ; GCN-LABEL: name: fp_save_restore_in_temp_sgpr
   ; GCN: bb.0.begin:
-  ; GCN:   liveins: $sgpr11, $sgpr30_sgpr31
+  ; GCN:   liveins: $sgpr11
   ; GCN:   $sgpr11 = frame-setup COPY $sgpr33
   ; GCN:   $sgpr33 = frame-setup COPY $sgpr32
   ; GCN: bb.1.lp_end:
-  ; GCN:   liveins: $sgpr10, $sgpr11, $vgpr1, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr30_sgpr31
+  ; GCN:   liveins: $sgpr10, $sgpr11, $vgpr1, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9
   ; GCN: bb.2.lp_begin:
-  ; GCN:   liveins: $sgpr10, $sgpr11, $vgpr1, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr30_sgpr31
+  ; GCN:   liveins: $sgpr10, $sgpr11, $vgpr1, $sgpr4_sgpr5, $sgpr6_sgpr7
   ; GCN: bb.3.Flow:
-  ; GCN:   liveins: $sgpr10, $sgpr11, $vgpr0, $vgpr1, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr30_sgpr31
+  ; GCN:   liveins: $sgpr10, $sgpr11, $vgpr0, $vgpr1, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9
   ; GCN: bb.4.end:
-  ; GCN:   liveins: $sgpr11, $vgpr0, $sgpr4_sgpr5, $sgpr30_sgpr31
+  ; GCN:   liveins: $sgpr11, $vgpr0, $sgpr4_sgpr5
   ; GCN:   $sgpr33 = frame-destroy COPY $sgpr11
 begin:
   br label %lp_begin

diff  --git a/llvm/test/CodeGen/AMDGPU/fpow.ll b/llvm/test/CodeGen/AMDGPU/fpow.ll
index ea461cab629df..bf783cba5ace7 100644
--- a/llvm/test/CodeGen/AMDGPU/fpow.ll
+++ b/llvm/test/CodeGen/AMDGPU/fpow.ll
@@ -202,9 +202,9 @@ define <2 x half> @v_pow_v2f16(<2 x half> %x, <2 x half> %y) {
 ; GFX8-NEXT:    v_log_f32_e32 v0, v0
 ; GFX8-NEXT:    v_mul_legacy_f32_e32 v2, v3, v2
 ; GFX8-NEXT:    v_mul_legacy_f32_e32 v0, v1, v0
-; GFX8-NEXT:    v_exp_f32_e32 v2, v2
+; GFX8-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX8-NEXT:    v_exp_f32_e32 v0, v0
-; GFX8-NEXT:    v_cvt_f16_f32_sdwa v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v1, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
@@ -220,9 +220,9 @@ define <2 x half> @v_pow_v2f16(<2 x half> %x, <2 x half> %y) {
 ; GFX9-NEXT:    v_log_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v2, v3, v2
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v0, v1, v0
-; GFX9-NEXT:    v_exp_f32_e32 v2, v2
+; GFX9-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX9-NEXT:    v_exp_f32_e32 v0, v0
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
@@ -238,9 +238,9 @@ define <2 x half> @v_pow_v2f16(<2 x half> %x, <2 x half> %y) {
 ; GFX90A-NEXT:    v_log_f32_e32 v0, v0
 ; GFX90A-NEXT:    v_mul_legacy_f32 v2, v3, v2
 ; GFX90A-NEXT:    v_mul_legacy_f32 v0, v1, v0
-; GFX90A-NEXT:    v_exp_f32_e32 v2, v2
+; GFX90A-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX90A-NEXT:    v_exp_f32_e32 v0, v0
-; GFX90A-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; GFX90A-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX90A-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX90A-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX90A-NEXT:    s_setpc_b64 s[30:31]
@@ -302,9 +302,9 @@ define <2 x half> @v_pow_v2f16_fneg_lhs(<2 x half> %x, <2 x half> %y) {
 ; GFX8-NEXT:    v_log_f32_e32 v0, v0
 ; GFX8-NEXT:    v_mul_legacy_f32_e32 v2, v3, v2
 ; GFX8-NEXT:    v_mul_legacy_f32_e32 v0, v1, v0
-; GFX8-NEXT:    v_exp_f32_e32 v2, v2
+; GFX8-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX8-NEXT:    v_exp_f32_e32 v0, v0
-; GFX8-NEXT:    v_cvt_f16_f32_sdwa v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v1, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
@@ -320,9 +320,9 @@ define <2 x half> @v_pow_v2f16_fneg_lhs(<2 x half> %x, <2 x half> %y) {
 ; GFX9-NEXT:    v_log_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v2, v3, v2
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v0, v1, v0
-; GFX9-NEXT:    v_exp_f32_e32 v2, v2
+; GFX9-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX9-NEXT:    v_exp_f32_e32 v0, v0
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
@@ -338,9 +338,9 @@ define <2 x half> @v_pow_v2f16_fneg_lhs(<2 x half> %x, <2 x half> %y) {
 ; GFX90A-NEXT:    v_log_f32_e32 v0, v0
 ; GFX90A-NEXT:    v_mul_legacy_f32 v2, v3, v2
 ; GFX90A-NEXT:    v_mul_legacy_f32 v0, v1, v0
-; GFX90A-NEXT:    v_exp_f32_e32 v2, v2
+; GFX90A-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX90A-NEXT:    v_exp_f32_e32 v0, v0
-; GFX90A-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; GFX90A-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX90A-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX90A-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX90A-NEXT:    s_setpc_b64 s[30:31]
@@ -403,9 +403,9 @@ define <2 x half> @v_pow_v2f16_fneg_rhs(<2 x half> %x, <2 x half> %y) {
 ; GFX8-NEXT:    v_log_f32_e32 v0, v0
 ; GFX8-NEXT:    v_mul_legacy_f32_e32 v2, v3, v2
 ; GFX8-NEXT:    v_mul_legacy_f32_e32 v0, v1, v0
-; GFX8-NEXT:    v_exp_f32_e32 v2, v2
+; GFX8-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX8-NEXT:    v_exp_f32_e32 v0, v0
-; GFX8-NEXT:    v_cvt_f16_f32_sdwa v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v1, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
@@ -421,9 +421,9 @@ define <2 x half> @v_pow_v2f16_fneg_rhs(<2 x half> %x, <2 x half> %y) {
 ; GFX9-NEXT:    v_log_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v2, v3, v2
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v0, v1, v0
-; GFX9-NEXT:    v_exp_f32_e32 v2, v2
+; GFX9-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX9-NEXT:    v_exp_f32_e32 v0, v0
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
@@ -439,9 +439,9 @@ define <2 x half> @v_pow_v2f16_fneg_rhs(<2 x half> %x, <2 x half> %y) {
 ; GFX90A-NEXT:    v_log_f32_e32 v0, v0
 ; GFX90A-NEXT:    v_mul_legacy_f32 v2, v3, v2
 ; GFX90A-NEXT:    v_mul_legacy_f32 v0, v1, v0
-; GFX90A-NEXT:    v_exp_f32_e32 v2, v2
+; GFX90A-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX90A-NEXT:    v_exp_f32_e32 v0, v0
-; GFX90A-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; GFX90A-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX90A-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX90A-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX90A-NEXT:    s_setpc_b64 s[30:31]
@@ -509,9 +509,9 @@ define <2 x half> @v_pow_v2f16_fneg_lhs_rhs(<2 x half> %x, <2 x half> %y) {
 ; GFX8-NEXT:    v_log_f32_e32 v0, v0
 ; GFX8-NEXT:    v_mul_legacy_f32_e32 v2, v3, v2
 ; GFX8-NEXT:    v_mul_legacy_f32_e32 v0, v1, v0
-; GFX8-NEXT:    v_exp_f32_e32 v2, v2
+; GFX8-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX8-NEXT:    v_exp_f32_e32 v0, v0
-; GFX8-NEXT:    v_cvt_f16_f32_sdwa v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
+; GFX8-NEXT:    v_cvt_f16_f32_sdwa v1, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
 ; GFX8-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX8-NEXT:    v_or_b32_e32 v0, v0, v1
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
@@ -527,9 +527,9 @@ define <2 x half> @v_pow_v2f16_fneg_lhs_rhs(<2 x half> %x, <2 x half> %y) {
 ; GFX9-NEXT:    v_log_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v2, v3, v2
 ; GFX9-NEXT:    v_mul_legacy_f32_e32 v0, v1, v0
-; GFX9-NEXT:    v_exp_f32_e32 v2, v2
+; GFX9-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX9-NEXT:    v_exp_f32_e32 v0, v0
-; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; GFX9-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX9-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
@@ -545,9 +545,9 @@ define <2 x half> @v_pow_v2f16_fneg_lhs_rhs(<2 x half> %x, <2 x half> %y) {
 ; GFX90A-NEXT:    v_log_f32_e32 v0, v0
 ; GFX90A-NEXT:    v_mul_legacy_f32 v2, v3, v2
 ; GFX90A-NEXT:    v_mul_legacy_f32 v0, v1, v0
-; GFX90A-NEXT:    v_exp_f32_e32 v2, v2
+; GFX90A-NEXT:    v_exp_f32_e32 v1, v2
 ; GFX90A-NEXT:    v_exp_f32_e32 v0, v0
-; GFX90A-NEXT:    v_cvt_f16_f32_e32 v1, v2
+; GFX90A-NEXT:    v_cvt_f16_f32_e32 v1, v1
 ; GFX90A-NEXT:    v_cvt_f16_f32_e32 v0, v0
 ; GFX90A-NEXT:    v_pack_b32_f16 v0, v0, v1
 ; GFX90A-NEXT:    s_setpc_b64 s[30:31]

diff  --git a/llvm/test/CodeGen/AMDGPU/frame-setup-without-sgpr-to-vgpr-spills.ll b/llvm/test/CodeGen/AMDGPU/frame-setup-without-sgpr-to-vgpr-spills.ll
index e7121cec90c8a..450cae8a4a388 100644
--- a/llvm/test/CodeGen/AMDGPU/frame-setup-without-sgpr-to-vgpr-spills.ll
+++ b/llvm/test/CodeGen/AMDGPU/frame-setup-without-sgpr-to-vgpr-spills.ll
@@ -25,15 +25,15 @@ define void @callee_with_stack_and_call() #0 {
 ; SPILL-TO-VGPR-NEXT:    s_add_u32 s4, s4, external_void_func_void at rel32@lo+4
 ; SPILL-TO-VGPR-NEXT:    s_addc_u32 s5, s5, external_void_func_void at rel32@hi+12
 ; SPILL-TO-VGPR-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; SPILL-TO-VGPR-NEXT:    v_readlane_b32 s4, v40, 0
-; SPILL-TO-VGPR-NEXT:    v_readlane_b32 s5, v40, 1
+; SPILL-TO-VGPR-NEXT:    v_readlane_b32 s31, v40, 1
+; SPILL-TO-VGPR-NEXT:    v_readlane_b32 s30, v40, 0
 ; SPILL-TO-VGPR-NEXT:    s_addk_i32 s32, 0xfc00
 ; SPILL-TO-VGPR-NEXT:    v_readlane_b32 s33, v40, 2
-; SPILL-TO-VGPR-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; SPILL-TO-VGPR-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; SPILL-TO-VGPR-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
-; SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, s[6:7]
+; SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, s[4:5]
 ; SPILL-TO-VGPR-NEXT:    s_waitcnt vmcnt(0)
-; SPILL-TO-VGPR-NEXT:    s_setpc_b64 s[4:5]
+; SPILL-TO-VGPR-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; NO-SPILL-TO-VGPR-LABEL: callee_with_stack_and_call:
 ; NO-SPILL-TO-VGPR:       ; %bb.0:
@@ -43,14 +43,21 @@ define void @callee_with_stack_and_call() #0 {
 ; NO-SPILL-TO-VGPR-NEXT:    s_mov_b32 s33, s32
 ; NO-SPILL-TO-VGPR-NEXT:    s_addk_i32 s32, 0x800
 ; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 s[8:9], exec
-; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, 3
+; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, 1
 ; NO-SPILL-TO-VGPR-NEXT:    buffer_store_dword v1, off, s[0:3], s33 offset:16
 ; NO-SPILL-TO-VGPR-NEXT:    v_writelane_b32 v1, s30, 0
-; NO-SPILL-TO-VGPR-NEXT:    v_writelane_b32 v1, s31, 1
 ; NO-SPILL-TO-VGPR-NEXT:    buffer_store_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
 ; NO-SPILL-TO-VGPR-NEXT:    buffer_load_dword v1, off, s[0:3], s33 offset:16
 ; NO-SPILL-TO-VGPR-NEXT:    s_waitcnt vmcnt(0)
 ; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, s[8:9]
+; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 s[8:9], exec
+; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, 1
+; NO-SPILL-TO-VGPR-NEXT:    buffer_store_dword v1, off, s[0:3], s33 offset:16
+; NO-SPILL-TO-VGPR-NEXT:    v_writelane_b32 v1, s31, 0
+; NO-SPILL-TO-VGPR-NEXT:    buffer_store_dword v1, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
+; NO-SPILL-TO-VGPR-NEXT:    buffer_load_dword v1, off, s[0:3], s33 offset:16
+; NO-SPILL-TO-VGPR-NEXT:    s_waitcnt vmcnt(0)
+; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, s[8:9]
 ; NO-SPILL-TO-VGPR-NEXT:    v_mov_b32_e32 v0, 0
 ; NO-SPILL-TO-VGPR-NEXT:    buffer_store_dword v0, off, s[0:3], s33
 ; NO-SPILL-TO-VGPR-NEXT:    s_waitcnt vmcnt(0)
@@ -58,21 +65,29 @@ define void @callee_with_stack_and_call() #0 {
 ; NO-SPILL-TO-VGPR-NEXT:    s_add_u32 s4, s4, external_void_func_void at rel32@lo+4
 ; NO-SPILL-TO-VGPR-NEXT:    s_addc_u32 s5, s5, external_void_func_void at rel32@hi+12
 ; NO-SPILL-TO-VGPR-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 s[6:7], exec
-; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, 3
+; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 s[4:5], exec
+; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, 1
+; NO-SPILL-TO-VGPR-NEXT:    buffer_store_dword v1, off, s[0:3], s33 offset:16
+; NO-SPILL-TO-VGPR-NEXT:    buffer_load_dword v1, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
+; NO-SPILL-TO-VGPR-NEXT:    s_waitcnt vmcnt(0)
+; NO-SPILL-TO-VGPR-NEXT:    v_readlane_b32 s31, v1, 0
+; NO-SPILL-TO-VGPR-NEXT:    buffer_load_dword v1, off, s[0:3], s33 offset:16
+; NO-SPILL-TO-VGPR-NEXT:    s_waitcnt vmcnt(0)
+; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, s[4:5]
+; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 s[4:5], exec
+; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, 1
 ; NO-SPILL-TO-VGPR-NEXT:    buffer_store_dword v1, off, s[0:3], s33 offset:16
 ; NO-SPILL-TO-VGPR-NEXT:    buffer_load_dword v1, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
 ; NO-SPILL-TO-VGPR-NEXT:    s_waitcnt vmcnt(0)
-; NO-SPILL-TO-VGPR-NEXT:    v_readlane_b32 s4, v1, 0
-; NO-SPILL-TO-VGPR-NEXT:    v_readlane_b32 s5, v1, 1
+; NO-SPILL-TO-VGPR-NEXT:    v_readlane_b32 s30, v1, 0
 ; NO-SPILL-TO-VGPR-NEXT:    buffer_load_dword v1, off, s[0:3], s33 offset:16
 ; NO-SPILL-TO-VGPR-NEXT:    s_waitcnt vmcnt(0)
-; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, s[6:7]
+; NO-SPILL-TO-VGPR-NEXT:    s_mov_b64 exec, s[4:5]
 ; NO-SPILL-TO-VGPR-NEXT:    s_addk_i32 s32, 0xf800
 ; NO-SPILL-TO-VGPR-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
 ; NO-SPILL-TO-VGPR-NEXT:    s_waitcnt vmcnt(0)
 ; NO-SPILL-TO-VGPR-NEXT:    v_readfirstlane_b32 s33, v0
-; NO-SPILL-TO-VGPR-NEXT:    s_setpc_b64 s[4:5]
+; NO-SPILL-TO-VGPR-NEXT:    s_setpc_b64 s[30:31]
   %alloca = alloca i32, addrspace(5)
   store volatile i32 0, i32 addrspace(5)* %alloca
   call void @external_void_func_void()

diff  --git a/llvm/test/CodeGen/AMDGPU/fshr.ll b/llvm/test/CodeGen/AMDGPU/fshr.ll
index 1c041d0c7d700..3eeec671fbc4b 100644
--- a/llvm/test/CodeGen/AMDGPU/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshr.ll
@@ -933,24 +933,24 @@ define <4 x i16> @v_fshr_v4i16(<4 x i16> %src0, <4 x i16> %src1, <4 x i16> %src2
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v7, 16, v5
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v8, 16, v3
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v11, 16, v4
-; GFX10-NEXT:    v_lshlrev_b16 v1, 1, v1
+; GFX10-NEXT:    v_lshrrev_b32_e32 v12, 16, v0
 ; GFX10-NEXT:    v_lshlrev_b16 v6, 1, v6
 ; GFX10-NEXT:    v_xor_b32_e32 v9, -1, v7
 ; GFX10-NEXT:    v_lshrrev_b16 v7, v7, v8
-; GFX10-NEXT:    v_lshrrev_b32_e32 v8, 16, v0
 ; GFX10-NEXT:    v_lshlrev_b16 v0, 1, v0
-; GFX10-NEXT:    v_xor_b32_e32 v12, -1, v5
+; GFX10-NEXT:    v_xor_b32_e32 v8, -1, v4
+; GFX10-NEXT:    v_lshlrev_b16 v1, 1, v1
 ; GFX10-NEXT:    v_lshlrev_b16 v6, v9, v6
-; GFX10-NEXT:    v_xor_b32_e32 v9, -1, v4
+; GFX10-NEXT:    v_xor_b32_e32 v9, -1, v5
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v10, 16, v2
-; GFX10-NEXT:    v_lshlrev_b16 v8, 1, v8
+; GFX10-NEXT:    v_lshlrev_b16 v12, 1, v12
 ; GFX10-NEXT:    v_xor_b32_e32 v13, -1, v11
+; GFX10-NEXT:    v_lshlrev_b16 v0, v8, v0
 ; GFX10-NEXT:    v_lshrrev_b16 v2, v4, v2
-; GFX10-NEXT:    v_lshlrev_b16 v0, v9, v0
-; GFX10-NEXT:    v_lshlrev_b16 v1, v12, v1
+; GFX10-NEXT:    v_lshlrev_b16 v1, v9, v1
 ; GFX10-NEXT:    v_lshrrev_b16 v3, v5, v3
 ; GFX10-NEXT:    v_lshrrev_b16 v4, v11, v10
-; GFX10-NEXT:    v_lshlrev_b16 v5, v13, v8
+; GFX10-NEXT:    v_lshlrev_b16 v5, v13, v12
 ; GFX10-NEXT:    v_or_b32_e32 v0, v0, v2
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0xffff
 ; GFX10-NEXT:    v_or_b32_e32 v1, v1, v3

diff  --git a/llvm/test/CodeGen/AMDGPU/gfx-call-non-gfx-func.ll b/llvm/test/CodeGen/AMDGPU/gfx-call-non-gfx-func.ll
index 301fff21eef01..8301b369c5f49 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx-call-non-gfx-func.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx-call-non-gfx-func.ll
@@ -11,7 +11,7 @@ define amdgpu_gfx void @gfx_func() {
 ; SDAG-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; SDAG-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; SDAG-NEXT:    s_mov_b64 exec, s[34:35]
-; SDAG-NEXT:    v_writelane_b32 v40, s33, 26
+; SDAG-NEXT:    v_writelane_b32 v40, s33, 28
 ; SDAG-NEXT:    v_writelane_b32 v40, s4, 0
 ; SDAG-NEXT:    v_writelane_b32 v40, s5, 1
 ; SDAG-NEXT:    v_writelane_b32 v40, s6, 2
@@ -30,24 +30,27 @@ define amdgpu_gfx void @gfx_func() {
 ; SDAG-NEXT:    v_writelane_b32 v40, s19, 15
 ; SDAG-NEXT:    v_writelane_b32 v40, s20, 16
 ; SDAG-NEXT:    v_writelane_b32 v40, s21, 17
-; SDAG-NEXT:    s_mov_b32 s33, s32
-; SDAG-NEXT:    s_addk_i32 s32, 0x400
 ; SDAG-NEXT:    v_writelane_b32 v40, s22, 18
 ; SDAG-NEXT:    v_writelane_b32 v40, s23, 19
-; SDAG-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; SDAG-NEXT:    s_getpc_b64 s[30:31]
-; SDAG-NEXT:    s_add_u32 s30, s30, extern_c_func at gotpcrel32@lo+4
-; SDAG-NEXT:    s_addc_u32 s31, s31, extern_c_func at gotpcrel32@hi+12
+; SDAG-NEXT:    s_mov_b32 s33, s32
+; SDAG-NEXT:    s_addk_i32 s32, 0x400
 ; SDAG-NEXT:    v_writelane_b32 v40, s24, 20
-; SDAG-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
 ; SDAG-NEXT:    v_writelane_b32 v40, s25, 21
+; SDAG-NEXT:    s_getpc_b64 s[34:35]
+; SDAG-NEXT:    s_add_u32 s34, s34, extern_c_func at gotpcrel32@lo+4
+; SDAG-NEXT:    s_addc_u32 s35, s35, extern_c_func at gotpcrel32@hi+12
 ; SDAG-NEXT:    v_writelane_b32 v40, s26, 22
+; SDAG-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; SDAG-NEXT:    v_writelane_b32 v40, s27, 23
 ; SDAG-NEXT:    v_writelane_b32 v40, s28, 24
-; SDAG-NEXT:    s_mov_b64 s[8:9], 0
 ; SDAG-NEXT:    v_writelane_b32 v40, s29, 25
+; SDAG-NEXT:    v_writelane_b32 v40, s30, 26
+; SDAG-NEXT:    s_mov_b64 s[8:9], 0
+; SDAG-NEXT:    v_writelane_b32 v40, s31, 27
 ; SDAG-NEXT:    s_waitcnt lgkmcnt(0)
-; SDAG-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; SDAG-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; SDAG-NEXT:    v_readlane_b32 s31, v40, 27
+; SDAG-NEXT:    v_readlane_b32 s30, v40, 26
 ; SDAG-NEXT:    v_readlane_b32 s29, v40, 25
 ; SDAG-NEXT:    v_readlane_b32 s28, v40, 24
 ; SDAG-NEXT:    v_readlane_b32 s27, v40, 23
@@ -75,12 +78,12 @@ define amdgpu_gfx void @gfx_func() {
 ; SDAG-NEXT:    v_readlane_b32 s5, v40, 1
 ; SDAG-NEXT:    v_readlane_b32 s4, v40, 0
 ; SDAG-NEXT:    s_addk_i32 s32, 0xfc00
-; SDAG-NEXT:    v_readlane_b32 s33, v40, 26
-; SDAG-NEXT:    s_or_saveexec_b64 s[30:31], -1
+; SDAG-NEXT:    v_readlane_b32 s33, v40, 28
+; SDAG-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; SDAG-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; SDAG-NEXT:    s_mov_b64 exec, s[30:31]
+; SDAG-NEXT:    s_mov_b64 exec, s[34:35]
 ; SDAG-NEXT:    s_waitcnt vmcnt(0)
-; SDAG-NEXT:    s_setpc_b64 s[36:37]
+; SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: gfx_func:
 ; GISEL:       ; %bb.0:
@@ -88,7 +91,7 @@ define amdgpu_gfx void @gfx_func() {
 ; GISEL-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GISEL-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GISEL-NEXT:    s_mov_b64 exec, s[34:35]
-; GISEL-NEXT:    v_writelane_b32 v40, s33, 26
+; GISEL-NEXT:    v_writelane_b32 v40, s33, 28
 ; GISEL-NEXT:    v_writelane_b32 v40, s4, 0
 ; GISEL-NEXT:    v_writelane_b32 v40, s5, 1
 ; GISEL-NEXT:    v_writelane_b32 v40, s6, 2
@@ -107,24 +110,27 @@ define amdgpu_gfx void @gfx_func() {
 ; GISEL-NEXT:    v_writelane_b32 v40, s19, 15
 ; GISEL-NEXT:    v_writelane_b32 v40, s20, 16
 ; GISEL-NEXT:    v_writelane_b32 v40, s21, 17
-; GISEL-NEXT:    s_mov_b32 s33, s32
-; GISEL-NEXT:    s_addk_i32 s32, 0x400
 ; GISEL-NEXT:    v_writelane_b32 v40, s22, 18
 ; GISEL-NEXT:    v_writelane_b32 v40, s23, 19
-; GISEL-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GISEL-NEXT:    s_getpc_b64 s[30:31]
-; GISEL-NEXT:    s_add_u32 s30, s30, extern_c_func at gotpcrel32@lo+4
-; GISEL-NEXT:    s_addc_u32 s31, s31, extern_c_func at gotpcrel32@hi+12
+; GISEL-NEXT:    s_mov_b32 s33, s32
+; GISEL-NEXT:    s_addk_i32 s32, 0x400
 ; GISEL-NEXT:    v_writelane_b32 v40, s24, 20
-; GISEL-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
 ; GISEL-NEXT:    v_writelane_b32 v40, s25, 21
+; GISEL-NEXT:    s_getpc_b64 s[34:35]
+; GISEL-NEXT:    s_add_u32 s34, s34, extern_c_func at gotpcrel32@lo+4
+; GISEL-NEXT:    s_addc_u32 s35, s35, extern_c_func at gotpcrel32@hi+12
 ; GISEL-NEXT:    v_writelane_b32 v40, s26, 22
+; GISEL-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GISEL-NEXT:    v_writelane_b32 v40, s27, 23
 ; GISEL-NEXT:    v_writelane_b32 v40, s28, 24
-; GISEL-NEXT:    s_mov_b64 s[8:9], s[4:5]
 ; GISEL-NEXT:    v_writelane_b32 v40, s29, 25
+; GISEL-NEXT:    v_writelane_b32 v40, s30, 26
+; GISEL-NEXT:    s_mov_b64 s[8:9], s[4:5]
+; GISEL-NEXT:    v_writelane_b32 v40, s31, 27
 ; GISEL-NEXT:    s_waitcnt lgkmcnt(0)
-; GISEL-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GISEL-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GISEL-NEXT:    v_readlane_b32 s31, v40, 27
+; GISEL-NEXT:    v_readlane_b32 s30, v40, 26
 ; GISEL-NEXT:    v_readlane_b32 s29, v40, 25
 ; GISEL-NEXT:    v_readlane_b32 s28, v40, 24
 ; GISEL-NEXT:    v_readlane_b32 s27, v40, 23
@@ -152,12 +158,12 @@ define amdgpu_gfx void @gfx_func() {
 ; GISEL-NEXT:    v_readlane_b32 s5, v40, 1
 ; GISEL-NEXT:    v_readlane_b32 s4, v40, 0
 ; GISEL-NEXT:    s_addk_i32 s32, 0xfc00
-; GISEL-NEXT:    v_readlane_b32 s33, v40, 26
-; GISEL-NEXT:    s_or_saveexec_b64 s[30:31], -1
+; GISEL-NEXT:    v_readlane_b32 s33, v40, 28
+; GISEL-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GISEL-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GISEL-NEXT:    s_mov_b64 exec, s[30:31]
+; GISEL-NEXT:    s_mov_b64 exec, s[34:35]
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GISEL-NEXT:    s_setpc_b64 s[36:37]
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
   call void @extern_c_func()
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
index df6e07002e8a2..ddea29ebc3985 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
@@ -105,13 +105,13 @@ define amdgpu_gfx void @test_call_external_void_func_i1_imm() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 1
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i1 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i1 at rel32@hi+12
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i1 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i1 at rel32@hi+12
 ; GFX9-NEXT:    buffer_store_byte v0, off, s[0:3], s32
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -132,15 +132,15 @@ define amdgpu_gfx void @test_call_external_void_func_i1_imm() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 1
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i1 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i1 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    buffer_store_byte v0, off, s[0:3], s32
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i1 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i1 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -169,8 +169,8 @@ define amdgpu_gfx void @test_call_external_void_func_i1_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    scratch_store_byte off, v0, s32
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -197,14 +197,14 @@ define amdgpu_gfx void @test_call_external_void_func_i1_signext(i32) #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i1_signext at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i1_signext at rel32@hi+12
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i1_signext at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i1_signext at rel32@hi+12
 ; GFX9-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX9-NEXT:    buffer_store_byte v0, off, s[0:3], s32
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -226,16 +226,16 @@ define amdgpu_gfx void @test_call_external_void_func_i1_signext(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i1_signext at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i1_signext at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i1_signext at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i1_signext at rel32@hi+12
 ; GFX10-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX10-NEXT:    buffer_store_byte v0, off, s[0:3], s32
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -266,8 +266,8 @@ define amdgpu_gfx void @test_call_external_void_func_i1_signext(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX10-SCRATCH-NEXT:    scratch_store_byte off, v0, s32
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -295,14 +295,14 @@ define amdgpu_gfx void @test_call_external_void_func_i1_zeroext(i32) #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i1_zeroext at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i1_zeroext at rel32@hi+12
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i1_zeroext at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i1_zeroext at rel32@hi+12
 ; GFX9-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX9-NEXT:    buffer_store_byte v0, off, s[0:3], s32
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -324,16 +324,16 @@ define amdgpu_gfx void @test_call_external_void_func_i1_zeroext(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i1_zeroext at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i1_zeroext at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i1_zeroext at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i1_zeroext at rel32@hi+12
 ; GFX10-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX10-NEXT:    buffer_store_byte v0, off, s[0:3], s32
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -364,8 +364,8 @@ define amdgpu_gfx void @test_call_external_void_func_i1_zeroext(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_and_b32_e32 v0, 1, v0
 ; GFX10-SCRATCH-NEXT:    scratch_store_byte off, v0, s32
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -392,12 +392,12 @@ define amdgpu_gfx void @test_call_external_void_func_i8_imm(i32) #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x7b
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i8 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i8 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i8 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i8 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -418,14 +418,14 @@ define amdgpu_gfx void @test_call_external_void_func_i8_imm(i32) #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0x7b
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i8 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i8 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i8 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i8 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -453,8 +453,8 @@ define amdgpu_gfx void @test_call_external_void_func_i8_imm(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -481,12 +481,12 @@ define amdgpu_gfx void @test_call_external_void_func_i8_signext(i32) #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i8_signext at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i8_signext at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i8_signext at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i8_signext at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -508,14 +508,14 @@ define amdgpu_gfx void @test_call_external_void_func_i8_signext(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i8_signext at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i8_signext at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i8_signext at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i8_signext at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -544,8 +544,8 @@ define amdgpu_gfx void @test_call_external_void_func_i8_signext(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -573,12 +573,12 @@ define amdgpu_gfx void @test_call_external_void_func_i8_zeroext(i32) #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i8_zeroext at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i8_zeroext at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i8_zeroext at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i8_zeroext at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -600,14 +600,14 @@ define amdgpu_gfx void @test_call_external_void_func_i8_zeroext(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i8_zeroext at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i8_zeroext at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i8_zeroext at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i8_zeroext at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -636,8 +636,8 @@ define amdgpu_gfx void @test_call_external_void_func_i8_zeroext(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -664,12 +664,12 @@ define amdgpu_gfx void @test_call_external_void_func_i16_imm() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x7b
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i16 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i16 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i16 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i16 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -690,14 +690,14 @@ define amdgpu_gfx void @test_call_external_void_func_i16_imm() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0x7b
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i16 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i16 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i16 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i16 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -725,8 +725,8 @@ define amdgpu_gfx void @test_call_external_void_func_i16_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -753,12 +753,12 @@ define amdgpu_gfx void @test_call_external_void_func_i16_signext(i32) #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i16_signext at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i16_signext at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i16_signext at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i16_signext at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -780,14 +780,14 @@ define amdgpu_gfx void @test_call_external_void_func_i16_signext(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i16_signext at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i16_signext at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i16_signext at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i16_signext at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -816,8 +816,8 @@ define amdgpu_gfx void @test_call_external_void_func_i16_signext(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -845,12 +845,12 @@ define amdgpu_gfx void @test_call_external_void_func_i16_zeroext(i32) #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i16_zeroext at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i16_zeroext at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i16_zeroext at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i16_zeroext at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -872,14 +872,14 @@ define amdgpu_gfx void @test_call_external_void_func_i16_zeroext(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i16_zeroext at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i16_zeroext at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i16_zeroext at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i16_zeroext at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -908,8 +908,8 @@ define amdgpu_gfx void @test_call_external_void_func_i16_zeroext(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -936,12 +936,12 @@ define amdgpu_gfx void @test_call_external_void_func_i32_imm(i32) #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 42
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -962,14 +962,14 @@ define amdgpu_gfx void @test_call_external_void_func_i32_imm(i32) #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 42
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -997,8 +997,8 @@ define amdgpu_gfx void @test_call_external_void_func_i32_imm(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -1025,12 +1025,12 @@ define amdgpu_gfx void @test_call_external_void_func_i64_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x7b
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i64 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i64 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i64 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i64 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -1053,13 +1053,13 @@ define amdgpu_gfx void @test_call_external_void_func_i64_imm() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i64 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i64 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i64 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i64 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -1088,8 +1088,8 @@ define amdgpu_gfx void @test_call_external_void_func_i64_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_i64 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -1117,12 +1117,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64() #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2i64 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i64 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2i64 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i64 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -1144,15 +1144,15 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2i64 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i64 at rel32@hi+12
 ; GFX10-NEXT:    global_load_dwordx4 v[0:3], v[0:1], off
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2i64 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i64 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -1182,8 +1182,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -1213,12 +1213,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 3
 ; GFX9-NEXT:    v_mov_b32_e32 v3, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2i64 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i64 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2i64 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i64 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -1243,13 +1243,13 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_imm() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2i64 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i64 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2i64 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i64 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -1280,8 +1280,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v2i64 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -1311,12 +1311,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3i64() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v4, 1
 ; GFX9-NEXT:    v_mov_b32_e32 v5, 2
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3i64 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i64 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3i64 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i64 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -1342,13 +1342,13 @@ define amdgpu_gfx void @test_call_external_void_func_v3i64() #0 {
 ; GFX10-NEXT:    global_load_dwordx4 v[0:3], v[0:1], off
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3i64 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i64 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3i64 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i64 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -1380,8 +1380,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3i64() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v3i64 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -1416,12 +1416,12 @@ define amdgpu_gfx void @test_call_external_void_func_v4i64() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v6, 3
 ; GFX9-NEXT:    v_mov_b32_e32 v7, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v4i64 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i64 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v4i64 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i64 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -1449,13 +1449,13 @@ define amdgpu_gfx void @test_call_external_void_func_v4i64() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v7, 4
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v4i64 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i64 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v4i64 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i64 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -1489,8 +1489,8 @@ define amdgpu_gfx void @test_call_external_void_func_v4i64() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v4i64 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -1518,12 +1518,12 @@ define amdgpu_gfx void @test_call_external_void_func_f16_imm() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x4400
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_f16 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_f16 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_f16 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_f16 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -1544,14 +1544,14 @@ define amdgpu_gfx void @test_call_external_void_func_f16_imm() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0x4400
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_f16 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_f16 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_f16 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_f16 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -1579,8 +1579,8 @@ define amdgpu_gfx void @test_call_external_void_func_f16_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -1606,12 +1606,12 @@ define amdgpu_gfx void @test_call_external_void_func_f32_imm() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 4.0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_f32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_f32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_f32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_f32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -1632,14 +1632,14 @@ define amdgpu_gfx void @test_call_external_void_func_f32_imm() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 4.0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_f32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_f32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_f32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_f32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -1667,8 +1667,8 @@ define amdgpu_gfx void @test_call_external_void_func_f32_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -1695,12 +1695,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2f32_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 1.0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 2.0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2f32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2f32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -1723,13 +1723,13 @@ define amdgpu_gfx void @test_call_external_void_func_v2f32_imm() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2f32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2f32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -1758,8 +1758,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2f32_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v2f32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -1787,12 +1787,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3f32_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 2.0
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 4.0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3f32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3f32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -1816,13 +1816,13 @@ define amdgpu_gfx void @test_call_external_void_func_v3f32_imm() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3f32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3f32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -1852,8 +1852,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3f32_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v3f32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -1883,12 +1883,12 @@ define amdgpu_gfx void @test_call_external_void_func_v5f32_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v3, -1.0
 ; GFX9-NEXT:    v_mov_b32_e32 v4, 0.5
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v5f32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v5f32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v5f32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v5f32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -1914,13 +1914,13 @@ define amdgpu_gfx void @test_call_external_void_func_v5f32_imm() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v4, 0.5
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v5f32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v5f32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v5f32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v5f32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -1952,8 +1952,8 @@ define amdgpu_gfx void @test_call_external_void_func_v5f32_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v5f32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -1980,12 +1980,12 @@ define amdgpu_gfx void @test_call_external_void_func_f64_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 0x40100000
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_f64 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_f64 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_f64 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_f64 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -2008,13 +2008,13 @@ define amdgpu_gfx void @test_call_external_void_func_f64_imm() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_f64 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_f64 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_f64 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_f64 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -2043,8 +2043,8 @@ define amdgpu_gfx void @test_call_external_void_func_f64_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_f64 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -2073,12 +2073,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2f64_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v3, 0x40100000
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2f64 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f64 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2f64 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f64 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -2103,13 +2103,13 @@ define amdgpu_gfx void @test_call_external_void_func_v2f64_imm() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2f64 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f64 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2f64 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f64 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -2140,8 +2140,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2f64_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v2f64 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -2172,12 +2172,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3f64_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v5, 0x40200000
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3f64 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f64 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3f64 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f64 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -2205,12 +2205,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3f64_imm() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3f64 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f64 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3f64 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f64 at rel32@hi+12
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -2243,8 +2243,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3f64_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_add_u32 s0, s0, external_void_func_v3f64 at rel32@lo+4
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v3f64 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -2270,12 +2270,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2i16() #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2i16 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i16 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2i16 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i16 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -2296,14 +2296,14 @@ define amdgpu_gfx void @test_call_external_void_func_v2i16() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2i16 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i16 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2i16 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i16 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -2331,8 +2331,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2i16() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -2359,12 +2359,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16() #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3i16 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i16 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3i16 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i16 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -2385,14 +2385,14 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3i16 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i16 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3i16 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i16 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -2420,8 +2420,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -2448,12 +2448,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16() #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3f16 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f16 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3f16 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f16 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -2474,14 +2474,14 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3f16 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f16 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3f16 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f16 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -2509,8 +2509,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -2538,12 +2538,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x20001
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 3
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3i16 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i16 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3i16 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i16 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -2566,13 +2566,13 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16_imm() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3i16 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i16 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3i16 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i16 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -2601,8 +2601,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v3i16 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -2629,12 +2629,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x40003c00
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 0x4400
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3f16 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f16 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3f16 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f16 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -2657,13 +2657,13 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16_imm() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3f16 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f16 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3f16 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f16 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -2692,8 +2692,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v3f16 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -2719,12 +2719,12 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16() #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v4i16 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i16 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v4i16 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i16 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -2745,14 +2745,14 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v4i16 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i16 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v4i16 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i16 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -2780,8 +2780,8 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -2809,12 +2809,12 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x20001
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 0x40003
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v4i16 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i16 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v4i16 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i16 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -2837,13 +2837,13 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16_imm() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v4i16 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i16 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v4i16 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i16 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -2872,8 +2872,8 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v4i16 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -2899,12 +2899,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2f16() #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2f16 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f16 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2f16 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f16 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -2925,14 +2925,14 @@ define amdgpu_gfx void @test_call_external_void_func_v2f16() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2f16 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f16 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2f16 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f16 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -2960,8 +2960,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2f16() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -2988,12 +2988,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32() #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -3014,14 +3014,14 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -3049,8 +3049,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -3078,12 +3078,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 1
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 2
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -3106,13 +3106,13 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32_imm() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -3141,8 +3141,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v2i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -3170,12 +3170,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_imm(i32) #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 4
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 5
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -3199,13 +3199,13 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_imm(i32) #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -3235,8 +3235,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_imm(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v3i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -3265,12 +3265,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_i32(i32) #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 5
 ; GFX9-NEXT:    v_mov_b32_e32 v3, 6
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3i32_i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i32_i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3i32_i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i32_i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -3295,13 +3295,13 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_i32(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3i32_i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i32_i32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3i32_i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i32_i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -3332,8 +3332,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_i32(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v3i32_i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -3359,12 +3359,12 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32() #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v4i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v4i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -3385,14 +3385,14 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v4i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v4i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -3420,8 +3420,8 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -3451,12 +3451,12 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 3
 ; GFX9-NEXT:    v_mov_b32_e32 v3, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v4i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v4i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -3481,13 +3481,13 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_imm() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v4i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v4i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -3518,8 +3518,8 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v4i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -3549,12 +3549,12 @@ define amdgpu_gfx void @test_call_external_void_func_v5i32_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v3, 4
 ; GFX9-NEXT:    v_mov_b32_e32 v4, 5
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v5i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v5i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v5i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v5i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -3580,13 +3580,13 @@ define amdgpu_gfx void @test_call_external_void_func_v5i32_imm() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v4, 5
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v5i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v5i32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v5i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v5i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -3618,8 +3618,8 @@ define amdgpu_gfx void @test_call_external_void_func_v5i32_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v5i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -3639,22 +3639,22 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32() #0 {
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GFX9-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
-; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX9-NEXT:    v_mov_b32_e32 v8, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    global_load_dwordx4 v[0:3], v8, s[30:31]
-; GFX9-NEXT:    global_load_dwordx4 v[4:7], v8, s[30:31] offset:16
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v8i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v8i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    global_load_dwordx4 v[0:3], v8, s[34:35]
+; GFX9-NEXT:    global_load_dwordx4 v[4:7], v8, s[34:35] offset:16
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v8i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v8i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -3671,24 +3671,23 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32() #0 {
 ; GFX10-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
-; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX10-NEXT:    v_mov_b32_e32 v8, 0
+; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
-; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
-; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    global_load_dwordx4 v[0:3], v8, s[30:31]
-; GFX10-NEXT:    global_load_dwordx4 v[4:7], v8, s[30:31] offset:16
-; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v8i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v8i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    global_load_dwordx4 v[0:3], v8, s[34:35]
+; GFX10-NEXT:    global_load_dwordx4 v[4:7], v8, s[34:35] offset:16
+; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v8i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v8i32 at rel32@hi+12
+; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -3721,8 +3720,8 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v8i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -3757,12 +3756,12 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_imm() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v6, 7
 ; GFX9-NEXT:    v_mov_b32_e32 v7, 8
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v8i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v8i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v8i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v8i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -3792,12 +3791,12 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_imm() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v8i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v8i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v8i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v8i32 at rel32@hi+12
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -3832,8 +3831,8 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_imm() #0 {
 ; GFX10-SCRATCH-NEXT:    s_add_u32 s0, s0, external_void_func_v8i32 at rel32@lo+4
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v8i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -3853,24 +3852,24 @@ define amdgpu_gfx void @test_call_external_void_func_v16i32() #0 {
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GFX9-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
-; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX9-NEXT:    v_mov_b32_e32 v16, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    global_load_dwordx4 v[0:3], v16, s[30:31]
-; GFX9-NEXT:    global_load_dwordx4 v[4:7], v16, s[30:31] offset:16
-; GFX9-NEXT:    global_load_dwordx4 v[8:11], v16, s[30:31] offset:32
-; GFX9-NEXT:    global_load_dwordx4 v[12:15], v16, s[30:31] offset:48
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v16i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v16i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    global_load_dwordx4 v[0:3], v16, s[34:35]
+; GFX9-NEXT:    global_load_dwordx4 v[4:7], v16, s[34:35] offset:16
+; GFX9-NEXT:    global_load_dwordx4 v[8:11], v16, s[34:35] offset:32
+; GFX9-NEXT:    global_load_dwordx4 v[12:15], v16, s[34:35] offset:48
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v16i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v16i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -3887,26 +3886,25 @@ define amdgpu_gfx void @test_call_external_void_func_v16i32() #0 {
 ; GFX10-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
-; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX10-NEXT:    v_mov_b32_e32 v16, 0
+; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
-; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
-; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x3
-; GFX10-NEXT:    global_load_dwordx4 v[0:3], v16, s[30:31]
-; GFX10-NEXT:    global_load_dwordx4 v[4:7], v16, s[30:31] offset:16
-; GFX10-NEXT:    global_load_dwordx4 v[8:11], v16, s[30:31] offset:32
-; GFX10-NEXT:    global_load_dwordx4 v[12:15], v16, s[30:31] offset:48
-; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v16i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v16i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    global_load_dwordx4 v[0:3], v16, s[34:35]
+; GFX10-NEXT:    global_load_dwordx4 v[4:7], v16, s[34:35] offset:16
+; GFX10-NEXT:    global_load_dwordx4 v[8:11], v16, s[34:35] offset:32
+; GFX10-NEXT:    global_load_dwordx4 v[12:15], v16, s[34:35] offset:48
+; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v16i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v16i32 at rel32@hi+12
+; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -3941,8 +3939,8 @@ define amdgpu_gfx void @test_call_external_void_func_v16i32() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v16i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -3964,29 +3962,29 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32() #0 {
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GFX9-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
-; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX9-NEXT:    v_mov_b32_e32 v28, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    global_load_dwordx4 v[0:3], v28, s[30:31]
-; GFX9-NEXT:    global_load_dwordx4 v[4:7], v28, s[30:31] offset:16
-; GFX9-NEXT:    global_load_dwordx4 v[8:11], v28, s[30:31] offset:32
-; GFX9-NEXT:    global_load_dwordx4 v[12:15], v28, s[30:31] offset:48
-; GFX9-NEXT:    global_load_dwordx4 v[16:19], v28, s[30:31] offset:64
-; GFX9-NEXT:    global_load_dwordx4 v[20:23], v28, s[30:31] offset:80
-; GFX9-NEXT:    global_load_dwordx4 v[24:27], v28, s[30:31] offset:96
+; GFX9-NEXT:    global_load_dwordx4 v[0:3], v28, s[34:35]
+; GFX9-NEXT:    global_load_dwordx4 v[4:7], v28, s[34:35] offset:16
+; GFX9-NEXT:    global_load_dwordx4 v[8:11], v28, s[34:35] offset:32
+; GFX9-NEXT:    global_load_dwordx4 v[12:15], v28, s[34:35] offset:48
+; GFX9-NEXT:    global_load_dwordx4 v[16:19], v28, s[34:35] offset:64
+; GFX9-NEXT:    global_load_dwordx4 v[20:23], v28, s[34:35] offset:80
+; GFX9-NEXT:    global_load_dwordx4 v[24:27], v28, s[34:35] offset:96
 ; GFX9-NEXT:    s_nop 0
-; GFX9-NEXT:    global_load_dwordx4 v[28:31], v28, s[30:31] offset:112
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v32i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v32i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    global_load_dwordx4 v[28:31], v28, s[34:35] offset:112
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v32i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v32i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -4003,30 +4001,29 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32() #0 {
 ; GFX10-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
-; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX10-NEXT:    v_mov_b32_e32 v32, 0
+; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
-; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
-; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x7
-; GFX10-NEXT:    global_load_dwordx4 v[0:3], v32, s[30:31]
-; GFX10-NEXT:    global_load_dwordx4 v[4:7], v32, s[30:31] offset:16
-; GFX10-NEXT:    global_load_dwordx4 v[8:11], v32, s[30:31] offset:32
-; GFX10-NEXT:    global_load_dwordx4 v[12:15], v32, s[30:31] offset:48
-; GFX10-NEXT:    global_load_dwordx4 v[16:19], v32, s[30:31] offset:64
-; GFX10-NEXT:    global_load_dwordx4 v[20:23], v32, s[30:31] offset:80
-; GFX10-NEXT:    global_load_dwordx4 v[24:27], v32, s[30:31] offset:96
-; GFX10-NEXT:    global_load_dwordx4 v[28:31], v32, s[30:31] offset:112
-; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v32i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v32i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    global_load_dwordx4 v[0:3], v32, s[34:35]
+; GFX10-NEXT:    global_load_dwordx4 v[4:7], v32, s[34:35] offset:16
+; GFX10-NEXT:    global_load_dwordx4 v[8:11], v32, s[34:35] offset:32
+; GFX10-NEXT:    global_load_dwordx4 v[12:15], v32, s[34:35] offset:48
+; GFX10-NEXT:    global_load_dwordx4 v[16:19], v32, s[34:35] offset:64
+; GFX10-NEXT:    global_load_dwordx4 v[20:23], v32, s[34:35] offset:80
+; GFX10-NEXT:    global_load_dwordx4 v[24:27], v32, s[34:35] offset:96
+; GFX10-NEXT:    global_load_dwordx4 v[28:31], v32, s[34:35] offset:112
+; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v32i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v32i32 at rel32@hi+12
+; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -4065,8 +4062,8 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_v32i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -4088,32 +4085,32 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_i32(i32) #0 {
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GFX9-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
-; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX9-NEXT:    v_mov_b32_e32 v28, 0
 ; GFX9-NEXT:    global_load_dword v32, v[0:1], off
+; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    global_load_dwordx4 v[0:3], v28, s[30:31]
-; GFX9-NEXT:    global_load_dwordx4 v[4:7], v28, s[30:31] offset:16
-; GFX9-NEXT:    global_load_dwordx4 v[8:11], v28, s[30:31] offset:32
-; GFX9-NEXT:    global_load_dwordx4 v[12:15], v28, s[30:31] offset:48
-; GFX9-NEXT:    global_load_dwordx4 v[16:19], v28, s[30:31] offset:64
-; GFX9-NEXT:    global_load_dwordx4 v[20:23], v28, s[30:31] offset:80
-; GFX9-NEXT:    global_load_dwordx4 v[24:27], v28, s[30:31] offset:96
+; GFX9-NEXT:    global_load_dwordx4 v[0:3], v28, s[34:35]
+; GFX9-NEXT:    global_load_dwordx4 v[4:7], v28, s[34:35] offset:16
+; GFX9-NEXT:    global_load_dwordx4 v[8:11], v28, s[34:35] offset:32
+; GFX9-NEXT:    global_load_dwordx4 v[12:15], v28, s[34:35] offset:48
+; GFX9-NEXT:    global_load_dwordx4 v[16:19], v28, s[34:35] offset:64
+; GFX9-NEXT:    global_load_dwordx4 v[20:23], v28, s[34:35] offset:80
+; GFX9-NEXT:    global_load_dwordx4 v[24:27], v28, s[34:35] offset:96
 ; GFX9-NEXT:    s_nop 0
-; GFX9-NEXT:    global_load_dwordx4 v[28:31], v28, s[30:31] offset:112
+; GFX9-NEXT:    global_load_dwordx4 v[28:31], v28, s[34:35] offset:112
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v32i32_i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v32i32_i32 at rel32@hi+12
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v32i32_i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v32i32_i32 at rel32@hi+12
 ; GFX9-NEXT:    s_waitcnt vmcnt(8)
 ; GFX9-NEXT:    buffer_store_dword v32, off, s[0:3], s32
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -4130,33 +4127,32 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_i32(i32) #0 {
 ; GFX10-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
-; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX10-NEXT:    v_mov_b32_e32 v32, 0
+; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
-; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
-; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
 ; GFX10-NEXT:    global_load_dword v33, v[0:1], off
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x7
-; GFX10-NEXT:    global_load_dwordx4 v[0:3], v32, s[30:31]
-; GFX10-NEXT:    global_load_dwordx4 v[4:7], v32, s[30:31] offset:16
-; GFX10-NEXT:    global_load_dwordx4 v[8:11], v32, s[30:31] offset:32
-; GFX10-NEXT:    global_load_dwordx4 v[12:15], v32, s[30:31] offset:48
-; GFX10-NEXT:    global_load_dwordx4 v[16:19], v32, s[30:31] offset:64
-; GFX10-NEXT:    global_load_dwordx4 v[20:23], v32, s[30:31] offset:80
-; GFX10-NEXT:    global_load_dwordx4 v[24:27], v32, s[30:31] offset:96
-; GFX10-NEXT:    global_load_dwordx4 v[28:31], v32, s[30:31] offset:112
-; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v32i32_i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v32i32_i32 at rel32@hi+12
+; GFX10-NEXT:    global_load_dwordx4 v[0:3], v32, s[34:35]
+; GFX10-NEXT:    global_load_dwordx4 v[4:7], v32, s[34:35] offset:16
+; GFX10-NEXT:    global_load_dwordx4 v[8:11], v32, s[34:35] offset:32
+; GFX10-NEXT:    global_load_dwordx4 v[12:15], v32, s[34:35] offset:48
+; GFX10-NEXT:    global_load_dwordx4 v[16:19], v32, s[34:35] offset:64
+; GFX10-NEXT:    global_load_dwordx4 v[20:23], v32, s[34:35] offset:80
+; GFX10-NEXT:    global_load_dwordx4 v[24:27], v32, s[34:35] offset:96
+; GFX10-NEXT:    global_load_dwordx4 v[28:31], v32, s[34:35] offset:112
+; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v32i32_i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v32i32_i32 at rel32@hi+12
+; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-NEXT:    s_waitcnt vmcnt(8)
 ; GFX10-NEXT:    buffer_store_dword v33, off, s[0:3], s32
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -4198,8 +4194,8 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_i32(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    s_waitcnt vmcnt(8)
 ; GFX10-SCRATCH-NEXT:    scratch_store_dword off, v33, s32
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -4232,16 +4228,16 @@ define amdgpu_gfx void @test_call_external_i32_func_i32_imm(i32 addrspace(1)* %o
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 42
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX9-NEXT:    v_mov_b32_e32 v42, v1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_i32_func_i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_i32_func_i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_i32_func_i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_i32_func_i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    global_store_dword v[41:42], v0, off
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-NEXT:    buffer_load_dword v42, off, s[0:3], s33 ; 4-byte Folded Reload
 ; GFX9-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -4267,18 +4263,18 @@ define amdgpu_gfx void @test_call_external_i32_func_i32_imm(i32 addrspace(1)* %o
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 42
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_mov_b32_e32 v42, v1
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_i32_func_i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_i32_func_i32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_i32_func_i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_i32_func_i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    global_store_dword v[41:42], v0, off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    s_clause 0x1
 ; GFX10-NEXT:    buffer_load_dword v42, off, s[0:3], s33
 ; GFX10-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:4
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -4315,8 +4311,8 @@ define amdgpu_gfx void @test_call_external_i32_func_i32_imm(i32 addrspace(1)* %o
 ; GFX10-SCRATCH-NEXT:    s_clause 0x1
 ; GFX10-SCRATCH-NEXT:    scratch_load_dword v42, off, s33
 ; GFX10-SCRATCH-NEXT:    scratch_load_dword v41, off, s33 offset:4
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -4337,22 +4333,22 @@ define amdgpu_gfx void @test_call_external_void_func_struct_i8_i32() #0 {
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GFX9-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
-; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    global_load_ubyte v0, v2, s[30:31]
-; GFX9-NEXT:    global_load_dword v1, v2, s[30:31] offset:4
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_struct_i8_i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_struct_i8_i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    global_load_ubyte v0, v2, s[34:35]
+; GFX9-NEXT:    global_load_dword v1, v2, s[34:35] offset:4
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_struct_i8_i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_struct_i8_i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -4369,24 +4365,23 @@ define amdgpu_gfx void @test_call_external_void_func_struct_i8_i32() #0 {
 ; GFX10-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
-; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0
+; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
-; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
-; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    global_load_ubyte v0, v2, s[30:31]
-; GFX10-NEXT:    global_load_dword v1, v2, s[30:31] offset:4
-; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_struct_i8_i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_struct_i8_i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    global_load_ubyte v0, v2, s[34:35]
+; GFX10-NEXT:    global_load_dword v1, v2, s[34:35] offset:4
+; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_struct_i8_i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_struct_i8_i32 at rel32@hi+12
+; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -4419,8 +4414,8 @@ define amdgpu_gfx void @test_call_external_void_func_struct_i8_i32() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_struct_i8_i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -4452,12 +4447,12 @@ define amdgpu_gfx void @test_call_external_void_func_byval_struct_i8_i32() #0 {
 ; GFX9-NEXT:    buffer_store_dword v0, off, s[0:3], s33 offset:4
 ; GFX9-NEXT:    v_lshrrev_b32_e64 v0, 6, s33
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_byval_struct_i8_i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_byval_struct_i8_i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_byval_struct_i8_i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_byval_struct_i8_i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -4483,13 +4478,13 @@ define amdgpu_gfx void @test_call_external_void_func_byval_struct_i8_i32() #0 {
 ; GFX10-NEXT:    buffer_store_byte v0, off, s[0:3], s33
 ; GFX10-NEXT:    buffer_store_dword v1, off, s[0:3], s33 offset:4
 ; GFX10-NEXT:    v_lshrrev_b32_e64 v0, 5, s33
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_byval_struct_i8_i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_byval_struct_i8_i32 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_byval_struct_i8_i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_byval_struct_i8_i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -4521,8 +4516,8 @@ define amdgpu_gfx void @test_call_external_void_func_byval_struct_i8_i32() #0 {
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_byval_struct_i8_i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -4559,14 +4554,14 @@ define amdgpu_gfx void @test_call_external_void_func_sret_struct_i8_i32_byval_st
 ; GFX9-NEXT:    v_add_u32_e32 v0, 8, v0
 ; GFX9-NEXT:    v_lshrrev_b32_e64 v1, 6, s33
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_sret_struct_i8_i32_byval_struct_i8_i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_sret_struct_i8_i32_byval_struct_i8_i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_sret_struct_i8_i32_byval_struct_i8_i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_sret_struct_i8_i32_byval_struct_i8_i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    buffer_load_ubyte v0, off, s[0:3], s33 offset:8
 ; GFX9-NEXT:    buffer_load_dword v1, off, s[0:3], s33 offset:12
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xf800
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
@@ -4598,17 +4593,17 @@ define amdgpu_gfx void @test_call_external_void_func_sret_struct_i8_i32_byval_st
 ; GFX10-NEXT:    v_lshrrev_b32_e64 v0, 5, s33
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_lshrrev_b32_e64 v1, 5, s33
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_sret_struct_i8_i32_byval_struct_i8_i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_sret_struct_i8_i32_byval_struct_i8_i32 at rel32@hi+12
 ; GFX10-NEXT:    v_add_nc_u32_e32 v0, 8, v0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_sret_struct_i8_i32_byval_struct_i8_i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_sret_struct_i8_i32_byval_struct_i8_i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    s_clause 0x1
 ; GFX10-NEXT:    buffer_load_ubyte v0, off, s[0:3], s33 offset:8
 ; GFX10-NEXT:    buffer_load_dword v1, off, s[0:3], s33 offset:12
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
@@ -4650,8 +4645,8 @@ define amdgpu_gfx void @test_call_external_void_func_sret_struct_i8_i32_byval_st
 ; GFX10-SCRATCH-NEXT:    s_clause 0x1
 ; GFX10-SCRATCH-NEXT:    scratch_load_ubyte v0, off, s33 offset:8
 ; GFX10-SCRATCH-NEXT:    scratch_load_dword v1, off, s33 offset:12
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_addk_i32 s32, 0xffe0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_waitcnt vmcnt(0)
@@ -4689,18 +4684,18 @@ define amdgpu_gfx void @test_call_external_void_func_v16i8() #0 {
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GFX9-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
-; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    global_load_dwordx4 v[0:3], v0, s[30:31]
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v16i8 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v16i8 at rel32@hi+12
+; GFX9-NEXT:    global_load_dwordx4 v[0:3], v0, s[34:35]
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v16i8 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v16i8 at rel32@hi+12
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v16, 8, v0
 ; GFX9-NEXT:    v_lshrrev_b32_e32 v17, 16, v0
@@ -4720,9 +4715,9 @@ define amdgpu_gfx void @test_call_external_void_func_v16i8() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v1, v16
 ; GFX9-NEXT:    v_mov_b32_e32 v2, v17
 ; GFX9-NEXT:    v_mov_b32_e32 v3, v18
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -4739,19 +4734,19 @@ define amdgpu_gfx void @test_call_external_void_func_v16i8() #0 {
 ; GFX10-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
-; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
+; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    global_load_dwordx4 v[0:3], v0, s[30:31]
+; GFX10-NEXT:    global_load_dwordx4 v[0:3], v0, s[34:35]
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v16i8 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v16i8 at rel32@hi+12
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v16i8 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v16i8 at rel32@hi+12
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v16, 8, v0
 ; GFX10-NEXT:    v_lshrrev_b32_e32 v17, 16, v0
@@ -4771,9 +4766,9 @@ define amdgpu_gfx void @test_call_external_void_func_v16i8() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v1, v16
 ; GFX10-NEXT:    v_mov_b32_e32 v2, v17
 ; GFX10-NEXT:    v_mov_b32_e32 v3, v18
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -4824,8 +4819,8 @@ define amdgpu_gfx void @test_call_external_void_func_v16i8() #0 {
 ; GFX10-SCRATCH-NEXT:    v_mov_b32_e32 v2, v17
 ; GFX10-SCRATCH-NEXT:    v_mov_b32_e32 v3, v18
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -4847,88 +4842,91 @@ define void @tail_call_byval_align16(<32 x i32> %val, double %tmp) #0 {
 ; GFX9-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GFX9-NEXT:    buffer_store_dword v40, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
 ; GFX9-NEXT:    s_mov_b64 exec, s[4:5]
-; GFX9-NEXT:    v_writelane_b32 v40, s33, 30
+; GFX9-NEXT:    v_writelane_b32 v40, s33, 32
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    buffer_load_dword v32, off, s[0:3], s33 offset:20
 ; GFX9-NEXT:    buffer_load_dword v33, off, s[0:3], s33 offset:16
 ; GFX9-NEXT:    buffer_load_dword v31, off, s[0:3], s33
-; GFX9-NEXT:    v_writelane_b32 v40, s34, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s35, 1
-; GFX9-NEXT:    v_writelane_b32 v40, s36, 2
-; GFX9-NEXT:    v_writelane_b32 v40, s37, 3
-; GFX9-NEXT:    v_writelane_b32 v40, s38, 4
-; GFX9-NEXT:    v_writelane_b32 v40, s39, 5
-; GFX9-NEXT:    v_writelane_b32 v40, s40, 6
-; GFX9-NEXT:    v_writelane_b32 v40, s41, 7
-; GFX9-NEXT:    v_writelane_b32 v40, s42, 8
-; GFX9-NEXT:    v_writelane_b32 v40, s43, 9
-; GFX9-NEXT:    v_writelane_b32 v40, s44, 10
-; GFX9-NEXT:    v_writelane_b32 v40, s45, 11
-; GFX9-NEXT:    v_writelane_b32 v40, s46, 12
-; GFX9-NEXT:    v_writelane_b32 v40, s47, 13
-; GFX9-NEXT:    v_writelane_b32 v40, s48, 14
-; GFX9-NEXT:    v_writelane_b32 v40, s49, 15
-; GFX9-NEXT:    v_writelane_b32 v40, s50, 16
-; GFX9-NEXT:    v_writelane_b32 v40, s51, 17
-; GFX9-NEXT:    v_writelane_b32 v40, s52, 18
-; GFX9-NEXT:    v_writelane_b32 v40, s53, 19
-; GFX9-NEXT:    v_writelane_b32 v40, s54, 20
-; GFX9-NEXT:    v_writelane_b32 v40, s55, 21
-; GFX9-NEXT:    v_writelane_b32 v40, s56, 22
-; GFX9-NEXT:    v_writelane_b32 v40, s57, 23
-; GFX9-NEXT:    v_writelane_b32 v40, s58, 24
-; GFX9-NEXT:    v_writelane_b32 v40, s59, 25
-; GFX9-NEXT:    v_writelane_b32 v40, s60, 26
-; GFX9-NEXT:    v_writelane_b32 v40, s61, 27
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX9-NEXT:    v_writelane_b32 v40, s34, 2
+; GFX9-NEXT:    v_writelane_b32 v40, s35, 3
+; GFX9-NEXT:    v_writelane_b32 v40, s36, 4
+; GFX9-NEXT:    v_writelane_b32 v40, s37, 5
+; GFX9-NEXT:    v_writelane_b32 v40, s38, 6
+; GFX9-NEXT:    v_writelane_b32 v40, s39, 7
+; GFX9-NEXT:    v_writelane_b32 v40, s40, 8
+; GFX9-NEXT:    v_writelane_b32 v40, s41, 9
+; GFX9-NEXT:    v_writelane_b32 v40, s42, 10
+; GFX9-NEXT:    v_writelane_b32 v40, s43, 11
+; GFX9-NEXT:    v_writelane_b32 v40, s44, 12
+; GFX9-NEXT:    v_writelane_b32 v40, s45, 13
+; GFX9-NEXT:    v_writelane_b32 v40, s46, 14
+; GFX9-NEXT:    v_writelane_b32 v40, s47, 15
+; GFX9-NEXT:    v_writelane_b32 v40, s48, 16
+; GFX9-NEXT:    v_writelane_b32 v40, s49, 17
+; GFX9-NEXT:    v_writelane_b32 v40, s50, 18
+; GFX9-NEXT:    v_writelane_b32 v40, s51, 19
+; GFX9-NEXT:    v_writelane_b32 v40, s52, 20
+; GFX9-NEXT:    v_writelane_b32 v40, s53, 21
+; GFX9-NEXT:    v_writelane_b32 v40, s54, 22
+; GFX9-NEXT:    v_writelane_b32 v40, s55, 23
+; GFX9-NEXT:    v_writelane_b32 v40, s56, 24
+; GFX9-NEXT:    v_writelane_b32 v40, s57, 25
+; GFX9-NEXT:    v_writelane_b32 v40, s58, 26
+; GFX9-NEXT:    v_writelane_b32 v40, s59, 27
+; GFX9-NEXT:    v_writelane_b32 v40, s60, 28
+; GFX9-NEXT:    v_writelane_b32 v40, s61, 29
 ; GFX9-NEXT:    s_addk_i32 s32, 0x800
-; GFX9-NEXT:    v_writelane_b32 v40, s62, 28
-; GFX9-NEXT:    v_writelane_b32 v40, s63, 29
-; GFX9-NEXT:    s_mov_b64 s[4:5], s[30:31]
-; GFX9-NEXT:    s_getpc_b64 s[6:7]
-; GFX9-NEXT:    s_add_u32 s6, s6, byval_align16_f64_arg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s7, s7, byval_align16_f64_arg at rel32@hi+12
+; GFX9-NEXT:    v_writelane_b32 v40, s62, 30
+; GFX9-NEXT:    v_writelane_b32 v40, s63, 31
+; GFX9-NEXT:    s_getpc_b64 s[4:5]
+; GFX9-NEXT:    s_add_u32 s4, s4, byval_align16_f64_arg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s5, s5, byval_align16_f64_arg at rel32@hi+12
 ; GFX9-NEXT:    s_waitcnt vmcnt(2)
 ; GFX9-NEXT:    buffer_store_dword v32, off, s[0:3], s32 offset:4
 ; GFX9-NEXT:    s_waitcnt vmcnt(2)
 ; GFX9-NEXT:    buffer_store_dword v33, off, s[0:3], s32
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT:    v_readlane_b32 s63, v40, 29
-; GFX9-NEXT:    v_readlane_b32 s62, v40, 28
-; GFX9-NEXT:    v_readlane_b32 s61, v40, 27
-; GFX9-NEXT:    v_readlane_b32 s60, v40, 26
-; GFX9-NEXT:    v_readlane_b32 s59, v40, 25
-; GFX9-NEXT:    v_readlane_b32 s58, v40, 24
-; GFX9-NEXT:    v_readlane_b32 s57, v40, 23
-; GFX9-NEXT:    v_readlane_b32 s56, v40, 22
-; GFX9-NEXT:    v_readlane_b32 s55, v40, 21
-; GFX9-NEXT:    v_readlane_b32 s54, v40, 20
-; GFX9-NEXT:    v_readlane_b32 s53, v40, 19
-; GFX9-NEXT:    v_readlane_b32 s52, v40, 18
-; GFX9-NEXT:    v_readlane_b32 s51, v40, 17
-; GFX9-NEXT:    v_readlane_b32 s50, v40, 16
-; GFX9-NEXT:    v_readlane_b32 s49, v40, 15
-; GFX9-NEXT:    v_readlane_b32 s48, v40, 14
-; GFX9-NEXT:    v_readlane_b32 s47, v40, 13
-; GFX9-NEXT:    v_readlane_b32 s46, v40, 12
-; GFX9-NEXT:    v_readlane_b32 s45, v40, 11
-; GFX9-NEXT:    v_readlane_b32 s44, v40, 10
-; GFX9-NEXT:    v_readlane_b32 s43, v40, 9
-; GFX9-NEXT:    v_readlane_b32 s42, v40, 8
-; GFX9-NEXT:    v_readlane_b32 s41, v40, 7
-; GFX9-NEXT:    v_readlane_b32 s40, v40, 6
-; GFX9-NEXT:    v_readlane_b32 s39, v40, 5
-; GFX9-NEXT:    v_readlane_b32 s38, v40, 4
-; GFX9-NEXT:    v_readlane_b32 s37, v40, 3
-; GFX9-NEXT:    v_readlane_b32 s36, v40, 2
-; GFX9-NEXT:    v_readlane_b32 s35, v40, 1
-; GFX9-NEXT:    v_readlane_b32 s34, v40, 0
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; GFX9-NEXT:    v_readlane_b32 s63, v40, 31
+; GFX9-NEXT:    v_readlane_b32 s62, v40, 30
+; GFX9-NEXT:    v_readlane_b32 s61, v40, 29
+; GFX9-NEXT:    v_readlane_b32 s60, v40, 28
+; GFX9-NEXT:    v_readlane_b32 s59, v40, 27
+; GFX9-NEXT:    v_readlane_b32 s58, v40, 26
+; GFX9-NEXT:    v_readlane_b32 s57, v40, 25
+; GFX9-NEXT:    v_readlane_b32 s56, v40, 24
+; GFX9-NEXT:    v_readlane_b32 s55, v40, 23
+; GFX9-NEXT:    v_readlane_b32 s54, v40, 22
+; GFX9-NEXT:    v_readlane_b32 s53, v40, 21
+; GFX9-NEXT:    v_readlane_b32 s52, v40, 20
+; GFX9-NEXT:    v_readlane_b32 s51, v40, 19
+; GFX9-NEXT:    v_readlane_b32 s50, v40, 18
+; GFX9-NEXT:    v_readlane_b32 s49, v40, 17
+; GFX9-NEXT:    v_readlane_b32 s48, v40, 16
+; GFX9-NEXT:    v_readlane_b32 s47, v40, 15
+; GFX9-NEXT:    v_readlane_b32 s46, v40, 14
+; GFX9-NEXT:    v_readlane_b32 s45, v40, 13
+; GFX9-NEXT:    v_readlane_b32 s44, v40, 12
+; GFX9-NEXT:    v_readlane_b32 s43, v40, 11
+; GFX9-NEXT:    v_readlane_b32 s42, v40, 10
+; GFX9-NEXT:    v_readlane_b32 s41, v40, 9
+; GFX9-NEXT:    v_readlane_b32 s40, v40, 8
+; GFX9-NEXT:    v_readlane_b32 s39, v40, 7
+; GFX9-NEXT:    v_readlane_b32 s38, v40, 6
+; GFX9-NEXT:    v_readlane_b32 s37, v40, 5
+; GFX9-NEXT:    v_readlane_b32 s36, v40, 4
+; GFX9-NEXT:    v_readlane_b32 s35, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s34, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xf800
-; GFX9-NEXT:    v_readlane_b32 s33, v40, 30
-; GFX9-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GFX9-NEXT:    v_readlane_b32 s33, v40, 32
+; GFX9-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GFX9-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
-; GFX9-NEXT:    s_mov_b64 exec, s[6:7]
+; GFX9-NEXT:    s_mov_b64 exec, s[4:5]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    s_setpc_b64 s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: tail_call_byval_align16:
 ; GFX10:       ; %bb.0: ; %entry
@@ -4938,90 +4936,93 @@ define void @tail_call_byval_align16(<32 x i32> %val, double %tmp) #0 {
 ; GFX10-NEXT:    buffer_store_dword v40, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s4
-; GFX10-NEXT:    v_writelane_b32 v40, s33, 30
+; GFX10-NEXT:    s_mov_b32 s6, s33
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_clause 0x2
 ; GFX10-NEXT:    buffer_load_dword v32, off, s[0:3], s33 offset:20
 ; GFX10-NEXT:    buffer_load_dword v33, off, s[0:3], s33 offset:16
 ; GFX10-NEXT:    buffer_load_dword v31, off, s[0:3], s33
+; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0x400
-; GFX10-NEXT:    v_writelane_b32 v40, s34, 0
-; GFX10-NEXT:    s_mov_b64 s[4:5], s[30:31]
-; GFX10-NEXT:    s_getpc_b64 s[6:7]
-; GFX10-NEXT:    s_add_u32 s6, s6, byval_align16_f64_arg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s7, s7, byval_align16_f64_arg at rel32@hi+12
+; GFX10-NEXT:    s_getpc_b64 s[4:5]
+; GFX10-NEXT:    s_add_u32 s4, s4, byval_align16_f64_arg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s5, s5, byval_align16_f64_arg at rel32@hi+12
 ; GFX10-NEXT:    s_waitcnt vmcnt(2)
 ; GFX10-NEXT:    buffer_store_dword v32, off, s[0:3], s32 offset:4
 ; GFX10-NEXT:    s_waitcnt vmcnt(1)
 ; GFX10-NEXT:    buffer_store_dword v33, off, s[0:3], s32
-; GFX10-NEXT:    v_writelane_b32 v40, s35, 1
-; GFX10-NEXT:    v_writelane_b32 v40, s36, 2
-; GFX10-NEXT:    v_writelane_b32 v40, s37, 3
-; GFX10-NEXT:    v_writelane_b32 v40, s38, 4
-; GFX10-NEXT:    v_writelane_b32 v40, s39, 5
-; GFX10-NEXT:    v_writelane_b32 v40, s40, 6
-; GFX10-NEXT:    v_writelane_b32 v40, s41, 7
-; GFX10-NEXT:    v_writelane_b32 v40, s42, 8
-; GFX10-NEXT:    v_writelane_b32 v40, s43, 9
-; GFX10-NEXT:    v_writelane_b32 v40, s44, 10
-; GFX10-NEXT:    v_writelane_b32 v40, s45, 11
-; GFX10-NEXT:    v_writelane_b32 v40, s46, 12
-; GFX10-NEXT:    v_writelane_b32 v40, s47, 13
-; GFX10-NEXT:    v_writelane_b32 v40, s48, 14
-; GFX10-NEXT:    v_writelane_b32 v40, s49, 15
-; GFX10-NEXT:    v_writelane_b32 v40, s50, 16
-; GFX10-NEXT:    v_writelane_b32 v40, s51, 17
-; GFX10-NEXT:    v_writelane_b32 v40, s52, 18
-; GFX10-NEXT:    v_writelane_b32 v40, s53, 19
-; GFX10-NEXT:    v_writelane_b32 v40, s54, 20
-; GFX10-NEXT:    v_writelane_b32 v40, s55, 21
-; GFX10-NEXT:    v_writelane_b32 v40, s56, 22
-; GFX10-NEXT:    v_writelane_b32 v40, s57, 23
-; GFX10-NEXT:    v_writelane_b32 v40, s58, 24
-; GFX10-NEXT:    v_writelane_b32 v40, s59, 25
-; GFX10-NEXT:    v_writelane_b32 v40, s60, 26
-; GFX10-NEXT:    v_writelane_b32 v40, s61, 27
-; GFX10-NEXT:    v_writelane_b32 v40, s62, 28
-; GFX10-NEXT:    v_writelane_b32 v40, s63, 29
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[6:7]
-; GFX10-NEXT:    v_readlane_b32 s63, v40, 29
-; GFX10-NEXT:    v_readlane_b32 s62, v40, 28
-; GFX10-NEXT:    v_readlane_b32 s61, v40, 27
-; GFX10-NEXT:    v_readlane_b32 s60, v40, 26
-; GFX10-NEXT:    v_readlane_b32 s59, v40, 25
-; GFX10-NEXT:    v_readlane_b32 s58, v40, 24
-; GFX10-NEXT:    v_readlane_b32 s57, v40, 23
-; GFX10-NEXT:    v_readlane_b32 s56, v40, 22
-; GFX10-NEXT:    v_readlane_b32 s55, v40, 21
-; GFX10-NEXT:    v_readlane_b32 s54, v40, 20
-; GFX10-NEXT:    v_readlane_b32 s53, v40, 19
-; GFX10-NEXT:    v_readlane_b32 s52, v40, 18
-; GFX10-NEXT:    v_readlane_b32 s51, v40, 17
-; GFX10-NEXT:    v_readlane_b32 s50, v40, 16
-; GFX10-NEXT:    v_readlane_b32 s49, v40, 15
-; GFX10-NEXT:    v_readlane_b32 s48, v40, 14
-; GFX10-NEXT:    v_readlane_b32 s47, v40, 13
-; GFX10-NEXT:    v_readlane_b32 s46, v40, 12
-; GFX10-NEXT:    v_readlane_b32 s45, v40, 11
-; GFX10-NEXT:    v_readlane_b32 s44, v40, 10
-; GFX10-NEXT:    v_readlane_b32 s43, v40, 9
-; GFX10-NEXT:    v_readlane_b32 s42, v40, 8
-; GFX10-NEXT:    v_readlane_b32 s41, v40, 7
-; GFX10-NEXT:    v_readlane_b32 s40, v40, 6
-; GFX10-NEXT:    v_readlane_b32 s39, v40, 5
-; GFX10-NEXT:    v_readlane_b32 s38, v40, 4
-; GFX10-NEXT:    v_readlane_b32 s37, v40, 3
-; GFX10-NEXT:    v_readlane_b32 s36, v40, 2
-; GFX10-NEXT:    v_readlane_b32 s35, v40, 1
-; GFX10-NEXT:    v_readlane_b32 s34, v40, 0
+; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX10-NEXT:    v_writelane_b32 v40, s34, 2
+; GFX10-NEXT:    v_writelane_b32 v40, s35, 3
+; GFX10-NEXT:    v_writelane_b32 v40, s36, 4
+; GFX10-NEXT:    v_writelane_b32 v40, s37, 5
+; GFX10-NEXT:    v_writelane_b32 v40, s38, 6
+; GFX10-NEXT:    v_writelane_b32 v40, s39, 7
+; GFX10-NEXT:    v_writelane_b32 v40, s40, 8
+; GFX10-NEXT:    v_writelane_b32 v40, s41, 9
+; GFX10-NEXT:    v_writelane_b32 v40, s42, 10
+; GFX10-NEXT:    v_writelane_b32 v40, s43, 11
+; GFX10-NEXT:    v_writelane_b32 v40, s44, 12
+; GFX10-NEXT:    v_writelane_b32 v40, s45, 13
+; GFX10-NEXT:    v_writelane_b32 v40, s46, 14
+; GFX10-NEXT:    v_writelane_b32 v40, s47, 15
+; GFX10-NEXT:    v_writelane_b32 v40, s48, 16
+; GFX10-NEXT:    v_writelane_b32 v40, s49, 17
+; GFX10-NEXT:    v_writelane_b32 v40, s50, 18
+; GFX10-NEXT:    v_writelane_b32 v40, s51, 19
+; GFX10-NEXT:    v_writelane_b32 v40, s52, 20
+; GFX10-NEXT:    v_writelane_b32 v40, s53, 21
+; GFX10-NEXT:    v_writelane_b32 v40, s54, 22
+; GFX10-NEXT:    v_writelane_b32 v40, s55, 23
+; GFX10-NEXT:    v_writelane_b32 v40, s56, 24
+; GFX10-NEXT:    v_writelane_b32 v40, s57, 25
+; GFX10-NEXT:    v_writelane_b32 v40, s58, 26
+; GFX10-NEXT:    v_writelane_b32 v40, s59, 27
+; GFX10-NEXT:    v_writelane_b32 v40, s60, 28
+; GFX10-NEXT:    v_writelane_b32 v40, s61, 29
+; GFX10-NEXT:    v_writelane_b32 v40, s62, 30
+; GFX10-NEXT:    v_writelane_b32 v40, s63, 31
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; GFX10-NEXT:    v_readlane_b32 s63, v40, 31
+; GFX10-NEXT:    v_readlane_b32 s62, v40, 30
+; GFX10-NEXT:    v_readlane_b32 s61, v40, 29
+; GFX10-NEXT:    v_readlane_b32 s60, v40, 28
+; GFX10-NEXT:    v_readlane_b32 s59, v40, 27
+; GFX10-NEXT:    v_readlane_b32 s58, v40, 26
+; GFX10-NEXT:    v_readlane_b32 s57, v40, 25
+; GFX10-NEXT:    v_readlane_b32 s56, v40, 24
+; GFX10-NEXT:    v_readlane_b32 s55, v40, 23
+; GFX10-NEXT:    v_readlane_b32 s54, v40, 22
+; GFX10-NEXT:    v_readlane_b32 s53, v40, 21
+; GFX10-NEXT:    v_readlane_b32 s52, v40, 20
+; GFX10-NEXT:    v_readlane_b32 s51, v40, 19
+; GFX10-NEXT:    v_readlane_b32 s50, v40, 18
+; GFX10-NEXT:    v_readlane_b32 s49, v40, 17
+; GFX10-NEXT:    v_readlane_b32 s48, v40, 16
+; GFX10-NEXT:    v_readlane_b32 s47, v40, 15
+; GFX10-NEXT:    v_readlane_b32 s46, v40, 14
+; GFX10-NEXT:    v_readlane_b32 s45, v40, 13
+; GFX10-NEXT:    v_readlane_b32 s44, v40, 12
+; GFX10-NEXT:    v_readlane_b32 s43, v40, 11
+; GFX10-NEXT:    v_readlane_b32 s42, v40, 10
+; GFX10-NEXT:    v_readlane_b32 s41, v40, 9
+; GFX10-NEXT:    v_readlane_b32 s40, v40, 8
+; GFX10-NEXT:    v_readlane_b32 s39, v40, 7
+; GFX10-NEXT:    v_readlane_b32 s38, v40, 6
+; GFX10-NEXT:    v_readlane_b32 s37, v40, 5
+; GFX10-NEXT:    v_readlane_b32 s36, v40, 4
+; GFX10-NEXT:    v_readlane_b32 s35, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s34, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfc00
-; GFX10-NEXT:    v_readlane_b32 s33, v40, 30
-; GFX10-NEXT:    s_or_saveexec_b32 s6, -1
+; GFX10-NEXT:    s_mov_b32 s33, s6
+; GFX10-NEXT:    s_or_saveexec_b32 s4, -1
 ; GFX10-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX10-NEXT:    s_mov_b32 exec_lo, s6
+; GFX10-NEXT:    s_mov_b32 exec_lo, s4
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    s_setpc_b64 s[4:5]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-SCRATCH-LABEL: tail_call_byval_align16:
 ; GFX10-SCRATCH:       ; %bb.0: ; %entry
@@ -5031,87 +5032,90 @@ define void @tail_call_byval_align16(<32 x i32> %val, double %tmp) #0 {
 ; GFX10-SCRATCH-NEXT:    scratch_store_dword off, v40, s32 offset:24 ; 4-byte Folded Spill
 ; GFX10-SCRATCH-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-SCRATCH-NEXT:    s_mov_b32 exec_lo, s0
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s33, 30
+; GFX10-SCRATCH-NEXT:    s_mov_b32 s4, s33
 ; GFX10-SCRATCH-NEXT:    s_mov_b32 s33, s32
 ; GFX10-SCRATCH-NEXT:    s_clause 0x1
 ; GFX10-SCRATCH-NEXT:    scratch_load_dwordx2 v[32:33], off, s33 offset:16
 ; GFX10-SCRATCH-NEXT:    scratch_load_dword v31, off, s33
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, 32
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s34, 0
-; GFX10-SCRATCH-NEXT:    s_mov_b64 s[4:5], s[30:31]
 ; GFX10-SCRATCH-NEXT:    s_getpc_b64 s[0:1]
 ; GFX10-SCRATCH-NEXT:    s_add_u32 s0, s0, byval_align16_f64_arg at rel32@lo+4
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, byval_align16_f64_arg at rel32@hi+12
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s35, 1
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s36, 2
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s37, 3
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s38, 4
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s39, 5
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s40, 6
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s41, 7
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s42, 8
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s43, 9
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s44, 10
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s45, 11
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s46, 12
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s47, 13
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s48, 14
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s49, 15
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s50, 16
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s51, 17
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s52, 18
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s53, 19
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s54, 20
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s55, 21
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s56, 22
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s57, 23
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s58, 24
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s59, 25
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s60, 26
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s61, 27
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s62, 28
-; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s63, 29
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s34, 2
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s35, 3
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s36, 4
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s37, 5
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s38, 6
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s39, 7
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s40, 8
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s41, 9
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s42, 10
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s43, 11
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s44, 12
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s45, 13
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s46, 14
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s47, 15
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s48, 16
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s49, 17
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s50, 18
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s51, 19
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s52, 20
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s53, 21
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s54, 22
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s55, 23
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s56, 24
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s57, 25
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s58, 26
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s59, 27
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s60, 28
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s61, 29
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s62, 30
+; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s63, 31
 ; GFX10-SCRATCH-NEXT:    s_waitcnt vmcnt(1)
 ; GFX10-SCRATCH-NEXT:    scratch_store_dwordx2 off, v[32:33], s32
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s63, v40, 29
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s62, v40, 28
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s61, v40, 27
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s60, v40, 26
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s59, v40, 25
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s58, v40, 24
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s57, v40, 23
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s56, v40, 22
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s55, v40, 21
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s54, v40, 20
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s53, v40, 19
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s52, v40, 18
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s51, v40, 17
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s50, v40, 16
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s49, v40, 15
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s48, v40, 14
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s47, v40, 13
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s46, v40, 12
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s45, v40, 11
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s44, v40, 10
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s43, v40, 9
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s42, v40, 8
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s41, v40, 7
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s40, v40, 6
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s39, v40, 5
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s38, v40, 4
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s37, v40, 3
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s36, v40, 2
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s35, v40, 1
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s34, v40, 0
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s63, v40, 31
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s62, v40, 30
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s61, v40, 29
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s60, v40, 28
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s59, v40, 27
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s58, v40, 26
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s57, v40, 25
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s56, v40, 24
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s55, v40, 23
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s54, v40, 22
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s53, v40, 21
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s52, v40, 20
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s51, v40, 19
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s50, v40, 18
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s49, v40, 17
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s48, v40, 16
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s47, v40, 15
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s46, v40, 14
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s45, v40, 13
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s44, v40, 12
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s43, v40, 11
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s42, v40, 10
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s41, v40, 9
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s40, v40, 8
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s39, v40, 7
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s38, v40, 6
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s37, v40, 5
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s36, v40, 4
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s35, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s34, v40, 2
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_addk_i32 s32, 0xffe0
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 30
+; GFX10-SCRATCH-NEXT:    s_mov_b32 s33, s4
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
 ; GFX10-SCRATCH-NEXT:    scratch_load_dword v40, off, s32 offset:24 ; 4-byte Folded Reload
 ; GFX10-SCRATCH-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-SCRATCH-NEXT:    s_mov_b32 exec_lo, s0
 ; GFX10-SCRATCH-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-SCRATCH-NEXT:    s_setpc_b64 s[4:5]
+; GFX10-SCRATCH-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %alloca = alloca double, align 8, addrspace(5)
   tail call amdgpu_gfx void @byval_align16_f64_arg(<32 x i32> %val, double addrspace(5)* byval(double) align 16 %alloca)
@@ -5132,13 +5136,13 @@ define amdgpu_gfx void @test_call_external_void_func_i1_imm_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 1
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i1_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i1_inreg at rel32@hi+12
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i1_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i1_inreg at rel32@hi+12
 ; GFX9-NEXT:    buffer_store_byte v0, off, s[0:3], s32
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -5159,15 +5163,15 @@ define amdgpu_gfx void @test_call_external_void_func_i1_imm_inreg() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 1
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i1_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i1_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    buffer_store_byte v0, off, s[0:3], s32
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i1_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i1_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -5196,8 +5200,8 @@ define amdgpu_gfx void @test_call_external_void_func_i1_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    scratch_store_byte off, v0, s32
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -5224,12 +5228,12 @@ define amdgpu_gfx void @test_call_external_void_func_i8_imm_inreg(i32) #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7b
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i8_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i8_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i8_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i8_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -5250,16 +5254,16 @@ define amdgpu_gfx void @test_call_external_void_func_i8_imm_inreg(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 3
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i8_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i8_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_movk_i32 s4, 0x7b
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i8_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i8_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
@@ -5289,8 +5293,8 @@ define amdgpu_gfx void @test_call_external_void_func_i8_imm_inreg(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 2
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 3
@@ -5318,12 +5322,12 @@ define amdgpu_gfx void @test_call_external_void_func_i16_imm_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7b
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i16_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i16_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i16_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i16_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -5344,16 +5348,16 @@ define amdgpu_gfx void @test_call_external_void_func_i16_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 3
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i16_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i16_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_movk_i32 s4, 0x7b
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i16_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i16_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
@@ -5383,8 +5387,8 @@ define amdgpu_gfx void @test_call_external_void_func_i16_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 2
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 3
@@ -5412,12 +5416,12 @@ define amdgpu_gfx void @test_call_external_void_func_i32_imm_inreg(i32) #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX9-NEXT:    s_mov_b32 s4, 42
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -5438,16 +5442,16 @@ define amdgpu_gfx void @test_call_external_void_func_i32_imm_inreg(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 3
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 42
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
@@ -5477,8 +5481,8 @@ define amdgpu_gfx void @test_call_external_void_func_i32_imm_inreg(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 2
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 3
@@ -5508,12 +5512,12 @@ define amdgpu_gfx void @test_call_external_void_func_i64_imm_inreg() #0 {
 ; GFX9-NEXT:    s_movk_i32 s4, 0x7b
 ; GFX9-NEXT:    s_mov_b32 s5, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_i64_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_i64_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_i64_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_i64_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -5535,18 +5539,18 @@ define amdgpu_gfx void @test_call_external_void_func_i64_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 4
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_i64_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_i64_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_movk_i32 s4, 0x7b
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX10-NEXT:    s_mov_b32 s5, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_i64_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_i64_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -5579,8 +5583,8 @@ define amdgpu_gfx void @test_call_external_void_func_i64_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
@@ -5606,19 +5610,19 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX9-NEXT:    v_writelane_b32 v40, s6, 2
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s7, 3
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 4
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX9-NEXT:    s_mov_b64 s[30:31], 0
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[34:35], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2i64_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i64_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 4
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 5
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2i64_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i64_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX9-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX9-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
@@ -5640,22 +5644,22 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 6
+; GFX10-NEXT:    s_mov_b64 s[34:35], 0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s6, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s7, 3
+; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2i64_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i64_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX10-NEXT:    s_mov_b64 s[30:31], 0
-; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[30:31], 0x0
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2i64_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i64_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
@@ -5692,8 +5696,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 5
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
@@ -5731,12 +5735,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s6, 3
 ; GFX9-NEXT:    s_mov_b32 s7, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2i64_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i64_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2i64_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i64_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX9-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX9-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
@@ -5760,6 +5764,9 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 6
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2i64_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i64_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
@@ -5770,12 +5777,9 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 s7, 4
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2i64_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i64_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
@@ -5814,8 +5818,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2i64_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 5
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
@@ -5843,23 +5847,23 @@ define amdgpu_gfx void @test_call_external_void_func_v3i64_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX9-NEXT:    v_writelane_b32 v40, s6, 2
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s7, 3
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[34:35], 0x0
 ; GFX9-NEXT:    v_writelane_b32 v40, s8, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s9, 5
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 6
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 7
-; GFX9-NEXT:    s_mov_b64 s[30:31], 0
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[30:31], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 6
 ; GFX9-NEXT:    s_mov_b32 s8, 1
 ; GFX9-NEXT:    s_mov_b32 s9, 2
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3i64_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i64_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 6
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 7
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3i64_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i64_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 7
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 6
 ; GFX9-NEXT:    v_readlane_b32 s9, v40, 5
 ; GFX9-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX9-NEXT:    v_readlane_b32 s7, v40, 3
@@ -5883,26 +5887,26 @@ define amdgpu_gfx void @test_call_external_void_func_v3i64_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 8
+; GFX10-NEXT:    s_mov_b64 s[34:35], 0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s6, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s7, 3
+; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3i64_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i64_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s8, 4
 ; GFX10-NEXT:    s_mov_b32 s8, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s9, 5
 ; GFX10-NEXT:    s_mov_b32 s9, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 6
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 7
-; GFX10-NEXT:    s_mov_b64 s[30:31], 0
-; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[30:31], 0x0
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3i64_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i64_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 6
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 7
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 6
 ; GFX10-NEXT:    v_readlane_b32 s9, v40, 5
 ; GFX10-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX10-NEXT:    v_readlane_b32 s7, v40, 3
@@ -5945,8 +5949,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3i64_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 6
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 7
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 6
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 7
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 6
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s9, v40, 5
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s7, v40, 3
@@ -5980,26 +5984,26 @@ define amdgpu_gfx void @test_call_external_void_func_v4i64_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX9-NEXT:    v_writelane_b32 v40, s6, 2
 ; GFX9-NEXT:    v_writelane_b32 v40, s7, 3
+; GFX9-NEXT:    s_mov_b64 s[34:35], 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s8, 4
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[34:35], 0x0
 ; GFX9-NEXT:    v_writelane_b32 v40, s9, 5
 ; GFX9-NEXT:    v_writelane_b32 v40, s10, 6
 ; GFX9-NEXT:    v_writelane_b32 v40, s11, 7
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 8
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 9
-; GFX9-NEXT:    s_mov_b64 s[30:31], 0
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[30:31], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 8
 ; GFX9-NEXT:    s_mov_b32 s8, 1
 ; GFX9-NEXT:    s_mov_b32 s9, 2
 ; GFX9-NEXT:    s_mov_b32 s10, 3
 ; GFX9-NEXT:    s_mov_b32 s11, 4
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v4i64_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i64_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 8
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 9
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v4i64_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i64_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 9
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX9-NEXT:    v_readlane_b32 s11, v40, 7
 ; GFX9-NEXT:    v_readlane_b32 s10, v40, 6
 ; GFX9-NEXT:    v_readlane_b32 s9, v40, 5
@@ -6025,12 +6029,17 @@ define amdgpu_gfx void @test_call_external_void_func_v4i64_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 10
+; GFX10-NEXT:    s_mov_b64 s[34:35], 0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s6, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s7, 3
+; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v4i64_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i64_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s8, 4
 ; GFX10-NEXT:    s_mov_b32 s8, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s9, 5
@@ -6041,14 +6050,9 @@ define amdgpu_gfx void @test_call_external_void_func_v4i64_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 s11, 4
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 8
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 9
-; GFX10-NEXT:    s_mov_b64 s[30:31], 0
-; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[30:31], 0x0
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v4i64_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i64_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 8
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 9
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX10-NEXT:    v_readlane_b32 s11, v40, 7
 ; GFX10-NEXT:    v_readlane_b32 s10, v40, 6
 ; GFX10-NEXT:    v_readlane_b32 s9, v40, 5
@@ -6097,8 +6101,8 @@ define amdgpu_gfx void @test_call_external_void_func_v4i64_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 8
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 9
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 9
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s11, v40, 7
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s10, v40, 6
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s9, v40, 5
@@ -6135,12 +6139,12 @@ define amdgpu_gfx void @test_call_external_void_func_f16_imm_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX9-NEXT:    s_movk_i32 s4, 0x4400
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_f16_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_f16_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_f16_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_f16_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -6161,16 +6165,16 @@ define amdgpu_gfx void @test_call_external_void_func_f16_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 3
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_f16_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_f16_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_movk_i32 s4, 0x4400
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_f16_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_f16_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
@@ -6200,8 +6204,8 @@ define amdgpu_gfx void @test_call_external_void_func_f16_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 2
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 3
@@ -6229,12 +6233,12 @@ define amdgpu_gfx void @test_call_external_void_func_f32_imm_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX9-NEXT:    s_mov_b32 s4, 4.0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_f32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_f32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_f32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_f32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -6255,16 +6259,16 @@ define amdgpu_gfx void @test_call_external_void_func_f32_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 3
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_f32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_f32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 4.0
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_f32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_f32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
@@ -6294,8 +6298,8 @@ define amdgpu_gfx void @test_call_external_void_func_f32_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 2
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 3
@@ -6325,12 +6329,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2f32_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s4, 1.0
 ; GFX9-NEXT:    s_mov_b32 s5, 2.0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2f32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2f32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -6352,18 +6356,18 @@ define amdgpu_gfx void @test_call_external_void_func_v2f32_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 4
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2f32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 1.0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX10-NEXT:    s_mov_b32 s5, 2.0
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2f32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -6396,8 +6400,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2f32_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
@@ -6430,12 +6434,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3f32_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s5, 2.0
 ; GFX9-NEXT:    s_mov_b32 s6, 4.0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 4
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3f32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 3
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3f32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 4
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 3
 ; GFX9-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
@@ -6458,6 +6462,9 @@ define amdgpu_gfx void @test_call_external_void_func_v3f32_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 5
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3f32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 1.0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
@@ -6466,12 +6473,9 @@ define amdgpu_gfx void @test_call_external_void_func_v3f32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 s6, 4.0
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 3
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 4
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3f32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 3
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 4
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 3
 ; GFX10-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
@@ -6507,8 +6511,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3f32_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 3
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 4
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 4
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
@@ -6546,12 +6550,12 @@ define amdgpu_gfx void @test_call_external_void_func_v5f32_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s7, -1.0
 ; GFX9-NEXT:    s_mov_b32 s8, 0.5
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 6
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v5f32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v5f32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 5
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v5f32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v5f32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 6
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 5
 ; GFX9-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX9-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX9-NEXT:    v_readlane_b32 s6, v40, 2
@@ -6576,6 +6580,9 @@ define amdgpu_gfx void @test_call_external_void_func_v5f32_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 7
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v5f32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v5f32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 1.0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
@@ -6588,12 +6595,9 @@ define amdgpu_gfx void @test_call_external_void_func_v5f32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 s8, 0.5
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 5
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 6
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v5f32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v5f32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 5
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 6
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 5
 ; GFX10-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX10-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-NEXT:    v_readlane_b32 s6, v40, 2
@@ -6635,8 +6639,8 @@ define amdgpu_gfx void @test_call_external_void_func_v5f32_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 5
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 6
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 5
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 6
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 5
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s6, v40, 2
@@ -6670,12 +6674,12 @@ define amdgpu_gfx void @test_call_external_void_func_f64_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s4, 0
 ; GFX9-NEXT:    s_mov_b32 s5, 0x40100000
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_f64_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_f64_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_f64_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_f64_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -6697,18 +6701,18 @@ define amdgpu_gfx void @test_call_external_void_func_f64_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 4
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_f64_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_f64_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX10-NEXT:    s_mov_b32 s5, 0x40100000
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_f64_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_f64_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -6741,8 +6745,8 @@ define amdgpu_gfx void @test_call_external_void_func_f64_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
@@ -6777,12 +6781,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2f64_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s6, 0
 ; GFX9-NEXT:    s_mov_b32 s7, 0x40100000
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2f64_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f64_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2f64_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f64_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX9-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX9-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
@@ -6806,6 +6810,9 @@ define amdgpu_gfx void @test_call_external_void_func_v2f64_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 6
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2f64_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f64_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
@@ -6816,12 +6823,9 @@ define amdgpu_gfx void @test_call_external_void_func_v2f64_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 s7, 0x40100000
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2f64_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f64_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
@@ -6860,8 +6864,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2f64_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 5
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
@@ -6902,12 +6906,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3f64_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s8, 0
 ; GFX9-NEXT:    s_mov_b32 s9, 0x40200000
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 7
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3f64_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f64_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 6
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3f64_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f64_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 7
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 6
 ; GFX9-NEXT:    v_readlane_b32 s9, v40, 5
 ; GFX9-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX9-NEXT:    v_readlane_b32 s7, v40, 3
@@ -6933,6 +6937,9 @@ define amdgpu_gfx void @test_call_external_void_func_v3f64_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 8
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3f64_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f64_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
@@ -6947,12 +6954,9 @@ define amdgpu_gfx void @test_call_external_void_func_v3f64_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 s9, 0x40200000
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 6
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 7
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3f64_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f64_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 6
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 7
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 6
 ; GFX10-NEXT:    v_readlane_b32 s9, v40, 5
 ; GFX10-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX10-NEXT:    v_readlane_b32 s7, v40, 3
@@ -6997,8 +7001,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3f64_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 6
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 7
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 6
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 7
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 6
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s9, v40, 5
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s7, v40, 3
@@ -7026,17 +7030,17 @@ define amdgpu_gfx void @test_call_external_void_func_v2i16_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
 ; GFX9-NEXT:    v_writelane_b32 v40, s33, 3
 ; GFX9-NEXT:    v_writelane_b32 v40, s4, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
-; GFX9-NEXT:    s_load_dword s4, s[30:31], 0x0
+; GFX9-NEXT:    s_load_dword s4, s[34:35], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2i16_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i16_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2i16_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i16_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -7058,15 +7062,15 @@ define amdgpu_gfx void @test_call_external_void_func_v2i16_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
+; GFX10-NEXT:    s_load_dword s4, s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2i16_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i16_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
-; GFX10-NEXT:    s_load_dword s4, s[30:31], 0x0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2i16_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i16_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
@@ -7096,8 +7100,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2i16_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 2
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 3
@@ -7122,17 +7126,17 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s33, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s5, 1
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 2
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[34:35], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3i16_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i16_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3i16_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i16_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -7156,15 +7160,15 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16_inreg() #0 {
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
+; GFX10-NEXT:    s_load_dwordx2 s[4:5], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3i16_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i16_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 2
-; GFX10-NEXT:    s_load_dwordx2 s[4:5], s[30:31], 0x0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3i16_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i16_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -7196,8 +7200,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
@@ -7223,17 +7227,17 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s33, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s5, 1
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 2
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[34:35], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3f16_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f16_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3f16_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f16_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -7257,15 +7261,15 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16_inreg() #0 {
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
+; GFX10-NEXT:    s_load_dwordx2 s[4:5], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3f16_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f16_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 2
-; GFX10-NEXT:    s_load_dwordx2 s[4:5], s[30:31], 0x0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3f16_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f16_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -7297,8 +7301,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
@@ -7330,12 +7334,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s4, 0x20001
 ; GFX9-NEXT:    s_mov_b32 s5, 3
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3i16_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i16_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3i16_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i16_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -7357,18 +7361,18 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 4
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3i16_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i16_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 0x20001
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX10-NEXT:    s_mov_b32 s5, 3
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3i16_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i16_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -7401,8 +7405,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3i16_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
@@ -7433,12 +7437,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s4, 0x40003c00
 ; GFX9-NEXT:    s_movk_i32 s5, 0x4400
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3f16_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f16_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3f16_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f16_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -7460,18 +7464,18 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 4
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3f16_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3f16_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 0x40003c00
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX10-NEXT:    s_movk_i32 s5, 0x4400
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3f16_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3f16_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -7504,8 +7508,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3f16_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
@@ -7530,17 +7534,17 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s33, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s5, 1
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 2
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[34:35], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v4i16_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i16_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v4i16_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i16_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -7564,15 +7568,15 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16_inreg() #0 {
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
+; GFX10-NEXT:    s_load_dwordx2 s[4:5], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v4i16_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i16_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 2
-; GFX10-NEXT:    s_load_dwordx2 s[4:5], s[30:31], 0x0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v4i16_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i16_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -7604,8 +7608,8 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
@@ -7637,12 +7641,12 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s4, 0x20001
 ; GFX9-NEXT:    s_mov_b32 s5, 0x40003
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v4i16_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i16_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v4i16_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i16_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -7664,18 +7668,18 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 4
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v4i16_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i16_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 0x20001
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX10-NEXT:    s_mov_b32 s5, 0x40003
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v4i16_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i16_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -7708,8 +7712,8 @@ define amdgpu_gfx void @test_call_external_void_func_v4i16_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
@@ -7733,17 +7737,17 @@ define amdgpu_gfx void @test_call_external_void_func_v2f16_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
 ; GFX9-NEXT:    v_writelane_b32 v40, s33, 3
 ; GFX9-NEXT:    v_writelane_b32 v40, s4, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
-; GFX9-NEXT:    s_load_dword s4, s[30:31], 0x0
+; GFX9-NEXT:    s_load_dword s4, s[34:35], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2f16_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f16_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2f16_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f16_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -7765,15 +7769,15 @@ define amdgpu_gfx void @test_call_external_void_func_v2f16_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
+; GFX10-NEXT:    s_load_dword s4, s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2f16_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2f16_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
-; GFX10-NEXT:    s_load_dword s4, s[30:31], 0x0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2f16_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2f16_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
@@ -7803,8 +7807,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2f16_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 2
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 3
@@ -7829,17 +7833,17 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s33, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s5, 1
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 2
-; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[34:35], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -7863,15 +7867,15 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32_inreg() #0 {
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
+; GFX10-NEXT:    s_load_dwordx2 s[4:5], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 2
-; GFX10-NEXT:    s_load_dwordx2 s[4:5], s[30:31], 0x0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -7903,8 +7907,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
@@ -7936,12 +7940,12 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s4, 1
 ; GFX9-NEXT:    s_mov_b32 s5, 2
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v2i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v2i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -7963,18 +7967,18 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 4
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v2i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v2i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX10-NEXT:    s_mov_b32 s5, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 3
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v2i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v2i32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -8007,8 +8011,8 @@ define amdgpu_gfx void @test_call_external_void_func_v2i32_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
@@ -8041,12 +8045,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_imm_inreg(i32) #0 {
 ; GFX9-NEXT:    s_mov_b32 s5, 4
 ; GFX9-NEXT:    s_mov_b32 s6, 5
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 4
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 3
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 4
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 3
 ; GFX9-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
@@ -8069,6 +8073,9 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_imm_inreg(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 5
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 3
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
@@ -8077,12 +8084,9 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_imm_inreg(i32) #0 {
 ; GFX10-NEXT:    s_mov_b32 s6, 5
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 3
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 4
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 3
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 4
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 3
 ; GFX10-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
@@ -8118,8 +8122,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_imm_inreg(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 3
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 4
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 4
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s4, v40, 0
@@ -8155,12 +8159,12 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_i32_inreg(i32) #0 {
 ; GFX9-NEXT:    s_mov_b32 s6, 5
 ; GFX9-NEXT:    s_mov_b32 s7, 6
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v3i32_i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i32_i32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v3i32_i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i32_i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX9-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX9-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
@@ -8184,6 +8188,9 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_i32_inreg(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 6
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v3i32_i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v3i32_i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 3
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
@@ -8194,12 +8201,9 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_i32_inreg(i32) #0 {
 ; GFX10-NEXT:    s_mov_b32 s7, 6
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v3i32_i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v3i32_i32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
@@ -8238,8 +8242,8 @@ define amdgpu_gfx void @test_call_external_void_func_v3i32_i32_inreg(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 5
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
@@ -8268,17 +8272,17 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX9-NEXT:    v_writelane_b32 v40, s6, 2
 ; GFX9-NEXT:    v_writelane_b32 v40, s7, 3
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 4
-; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[30:31], 0x0
+; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[34:35], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v4i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v4i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX9-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX9-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
@@ -8306,15 +8310,15 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s6, 2
 ; GFX10-NEXT:    v_writelane_b32 v40, s7, 3
+; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v4i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 4
-; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[30:31], 0x0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v4i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
@@ -8350,8 +8354,8 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 5
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
@@ -8389,12 +8393,12 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s6, 3
 ; GFX9-NEXT:    s_mov_b32 s7, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v4i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v4i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX9-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX9-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
@@ -8418,6 +8422,9 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 6
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v4i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v4i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
@@ -8428,12 +8435,9 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 s7, 4
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 5
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v4i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v4i32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
@@ -8472,8 +8476,8 @@ define amdgpu_gfx void @test_call_external_void_func_v4i32_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 4
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 5
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 5
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s6, v40, 2
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s5, v40, 1
@@ -8512,12 +8516,12 @@ define amdgpu_gfx void @test_call_external_void_func_v5i32_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s7, 4
 ; GFX9-NEXT:    s_mov_b32 s8, 5
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 6
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v5i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v5i32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 5
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v5i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v5i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 6
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 5
 ; GFX9-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX9-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX9-NEXT:    v_readlane_b32 s6, v40, 2
@@ -8542,6 +8546,9 @@ define amdgpu_gfx void @test_call_external_void_func_v5i32_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 7
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v5i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v5i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
@@ -8554,12 +8561,9 @@ define amdgpu_gfx void @test_call_external_void_func_v5i32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 s8, 5
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 5
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 6
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v5i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v5i32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 5
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 6
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 5
 ; GFX10-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX10-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-NEXT:    v_readlane_b32 s6, v40, 2
@@ -8601,8 +8605,8 @@ define amdgpu_gfx void @test_call_external_void_func_v5i32_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 5
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 6
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 5
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 6
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 5
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s8, v40, 4
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s7, v40, 3
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s6, v40, 2
@@ -8631,24 +8635,24 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s5, 1
 ; GFX9-NEXT:    v_writelane_b32 v40, s6, 2
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX9-NEXT:    v_writelane_b32 v40, s7, 3
 ; GFX9-NEXT:    v_writelane_b32 v40, s8, 4
 ; GFX9-NEXT:    v_writelane_b32 v40, s9, 5
 ; GFX9-NEXT:    v_writelane_b32 v40, s10, 6
 ; GFX9-NEXT:    v_writelane_b32 v40, s11, 7
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 8
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 9
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[34:35], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx8 s[4:11], s[30:31], 0x0
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v8i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v8i32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 8
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 8
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 9
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v8i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v8i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 9
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX9-NEXT:    v_readlane_b32 s11, v40, 7
 ; GFX9-NEXT:    v_readlane_b32 s10, v40, 6
 ; GFX9-NEXT:    v_readlane_b32 s9, v40, 5
@@ -8674,6 +8678,7 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 10
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
@@ -8684,17 +8689,16 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s9, 5
 ; GFX10-NEXT:    v_writelane_b32 v40, s10, 6
 ; GFX10-NEXT:    v_writelane_b32 v40, s11, 7
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_load_dwordx8 s[4:11], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v8i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v8i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 8
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 9
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
-; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_load_dwordx8 s[4:11], s[30:31], 0x0
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v8i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v8i32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 8
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 9
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX10-NEXT:    v_readlane_b32 s11, v40, 7
 ; GFX10-NEXT:    v_readlane_b32 s10, v40, 6
 ; GFX10-NEXT:    v_readlane_b32 s9, v40, 5
@@ -8740,8 +8744,8 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 8
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 9
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 9
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s11, v40, 7
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s10, v40, 6
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s9, v40, 5
@@ -8792,12 +8796,12 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_imm_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s10, 7
 ; GFX9-NEXT:    s_mov_b32 s11, 8
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 9
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v8i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v8i32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 8
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v8i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v8i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 9
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX9-NEXT:    v_readlane_b32 s11, v40, 7
 ; GFX9-NEXT:    v_readlane_b32 s10, v40, 6
 ; GFX9-NEXT:    v_readlane_b32 s9, v40, 5
@@ -8825,6 +8829,9 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_imm_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 10
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v8i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v8i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
 ; GFX10-NEXT:    s_mov_b32 s4, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s5, 1
@@ -8843,12 +8850,9 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_imm_inreg() #0 {
 ; GFX10-NEXT:    s_mov_b32 s11, 8
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 8
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 9
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v8i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v8i32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 8
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 9
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX10-NEXT:    v_readlane_b32 s11, v40, 7
 ; GFX10-NEXT:    v_readlane_b32 s10, v40, 6
 ; GFX10-NEXT:    v_readlane_b32 s9, v40, 5
@@ -8899,8 +8903,8 @@ define amdgpu_gfx void @test_call_external_void_func_v8i32_imm_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 8
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 9
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 9
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 8
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s11, v40, 7
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s10, v40, 6
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s9, v40, 5
@@ -8940,24 +8944,24 @@ define amdgpu_gfx void @test_call_external_void_func_v16i32_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s12, 8
 ; GFX9-NEXT:    v_writelane_b32 v40, s13, 9
 ; GFX9-NEXT:    v_writelane_b32 v40, s14, 10
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX9-NEXT:    v_writelane_b32 v40, s15, 11
 ; GFX9-NEXT:    v_writelane_b32 v40, s16, 12
 ; GFX9-NEXT:    v_writelane_b32 v40, s17, 13
 ; GFX9-NEXT:    v_writelane_b32 v40, s18, 14
 ; GFX9-NEXT:    v_writelane_b32 v40, s19, 15
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 16
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 17
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_load_dwordx16 s[4:19], s[34:35], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx16 s[4:19], s[30:31], 0x0
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v16i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v16i32_inreg at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 16
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 16
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 17
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v16i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v16i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 17
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 16
 ; GFX9-NEXT:    v_readlane_b32 s19, v40, 15
 ; GFX9-NEXT:    v_readlane_b32 s18, v40, 14
 ; GFX9-NEXT:    v_readlane_b32 s17, v40, 13
@@ -8991,6 +8995,7 @@ define amdgpu_gfx void @test_call_external_void_func_v16i32_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 18
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
@@ -9009,17 +9014,16 @@ define amdgpu_gfx void @test_call_external_void_func_v16i32_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s17, 13
 ; GFX10-NEXT:    v_writelane_b32 v40, s18, 14
 ; GFX10-NEXT:    v_writelane_b32 v40, s19, 15
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_load_dwordx16 s[4:19], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v16i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v16i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 16
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 17
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
-; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_load_dwordx16 s[4:19], s[30:31], 0x0
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v16i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v16i32_inreg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 16
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 17
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 16
 ; GFX10-NEXT:    v_readlane_b32 s19, v40, 15
 ; GFX10-NEXT:    v_readlane_b32 s18, v40, 14
 ; GFX10-NEXT:    v_readlane_b32 s17, v40, 13
@@ -9081,8 +9085,8 @@ define amdgpu_gfx void @test_call_external_void_func_v16i32_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 16
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 17
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 17
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s19, v40, 15
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s18, v40, 14
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s17, v40, 13
@@ -9137,29 +9141,24 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_inreg() #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s17, 13
 ; GFX9-NEXT:    v_writelane_b32 v40, s18, 14
 ; GFX9-NEXT:    v_writelane_b32 v40, s19, 15
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX9-NEXT:    v_writelane_b32 v40, s20, 16
 ; GFX9-NEXT:    v_writelane_b32 v40, s21, 17
 ; GFX9-NEXT:    v_writelane_b32 v40, s22, 18
 ; GFX9-NEXT:    v_writelane_b32 v40, s23, 19
 ; GFX9-NEXT:    v_writelane_b32 v40, s24, 20
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_load_dwordx16 s[36:51], s[34:35], 0x40
+; GFX9-NEXT:    s_load_dwordx16 s[4:19], s[34:35], 0x0
 ; GFX9-NEXT:    v_writelane_b32 v40, s25, 21
 ; GFX9-NEXT:    v_writelane_b32 v40, s26, 22
 ; GFX9-NEXT:    v_writelane_b32 v40, s27, 23
-; GFX9-NEXT:    v_writelane_b32 v40, s28, 24
-; GFX9-NEXT:    v_writelane_b32 v40, s29, 25
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 26
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 27
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dwordx16 s[36:51], s[30:31], 0x40
-; GFX9-NEXT:    s_load_dwordx16 s[4:19], s[30:31], 0x0
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v32i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v32i32_inreg at rel32@hi+12
+; GFX9-NEXT:    v_writelane_b32 v40, s28, 24
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s46
+; GFX9-NEXT:    v_writelane_b32 v40, s29, 25
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s47
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s48
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s49
@@ -9168,6 +9167,7 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_inreg() #0 {
 ; GFX9-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:8
 ; GFX9-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:12
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s50
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 26
 ; GFX9-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:16
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s51
 ; GFX9-NEXT:    s_mov_b32 s20, s36
@@ -9180,10 +9180,14 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_inreg() #0 {
 ; GFX9-NEXT:    s_mov_b32 s27, s43
 ; GFX9-NEXT:    s_mov_b32 s28, s44
 ; GFX9-NEXT:    s_mov_b32 s29, s45
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 27
 ; GFX9-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:20
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 26
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v32i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v32i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 27
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 26
 ; GFX9-NEXT:    v_readlane_b32 s29, v40, 25
 ; GFX9-NEXT:    v_readlane_b32 s28, v40, 24
 ; GFX9-NEXT:    v_readlane_b32 s27, v40, 23
@@ -9227,6 +9231,7 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_inreg() #0 {
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 28
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
@@ -9245,41 +9250,30 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_inreg() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s17, 13
 ; GFX10-NEXT:    v_writelane_b32 v40, s18, 14
 ; GFX10-NEXT:    v_writelane_b32 v40, s19, 15
+; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX10-NEXT:    s_clause 0x1
+; GFX10-NEXT:    s_load_dwordx16 s[36:51], s[34:35], 0x40
+; GFX10-NEXT:    s_load_dwordx16 s[4:19], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v32i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v32i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s20, 16
 ; GFX10-NEXT:    v_writelane_b32 v40, s21, 17
 ; GFX10-NEXT:    v_writelane_b32 v40, s22, 18
-; GFX10-NEXT:    v_writelane_b32 v40, s23, 19
-; GFX10-NEXT:    v_writelane_b32 v40, s24, 20
-; GFX10-NEXT:    v_writelane_b32 v40, s25, 21
-; GFX10-NEXT:    v_writelane_b32 v40, s26, 22
-; GFX10-NEXT:    v_writelane_b32 v40, s27, 23
-; GFX10-NEXT:    v_writelane_b32 v40, s28, 24
-; GFX10-NEXT:    v_writelane_b32 v40, s29, 25
-; GFX10-NEXT:    v_writelane_b32 v40, s30, 26
-; GFX10-NEXT:    v_writelane_b32 v40, s31, 27
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
-; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    s_load_dwordx16 s[36:51], s[30:31], 0x40
-; GFX10-NEXT:    s_load_dwordx16 s[4:19], s[30:31], 0x0
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v32i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v32i32_inreg at rel32@hi+12
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    v_mov_b32_e32 v0, s46
+; GFX10-NEXT:    v_writelane_b32 v40, s23, 19
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s47
 ; GFX10-NEXT:    v_mov_b32_e32 v2, s48
 ; GFX10-NEXT:    v_mov_b32_e32 v3, s49
 ; GFX10-NEXT:    s_mov_b32 s20, s36
+; GFX10-NEXT:    v_writelane_b32 v40, s24, 20
 ; GFX10-NEXT:    s_mov_b32 s21, s37
 ; GFX10-NEXT:    s_mov_b32 s22, s38
 ; GFX10-NEXT:    s_mov_b32 s23, s39
 ; GFX10-NEXT:    s_mov_b32 s24, s40
+; GFX10-NEXT:    v_writelane_b32 v40, s25, 21
 ; GFX10-NEXT:    s_mov_b32 s25, s41
-; GFX10-NEXT:    s_mov_b32 s26, s42
-; GFX10-NEXT:    s_mov_b32 s27, s43
-; GFX10-NEXT:    s_mov_b32 s28, s44
-; GFX10-NEXT:    s_mov_b32 s29, s45
 ; GFX10-NEXT:    v_mov_b32_e32 v4, s50
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s51
 ; GFX10-NEXT:    buffer_store_dword v0, off, s[0:3], s32
@@ -9288,9 +9282,19 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_inreg() #0 {
 ; GFX10-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:12
 ; GFX10-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:16
 ; GFX10-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:20
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 26
+; GFX10-NEXT:    v_writelane_b32 v40, s26, 22
+; GFX10-NEXT:    s_mov_b32 s26, s42
+; GFX10-NEXT:    v_writelane_b32 v40, s27, 23
+; GFX10-NEXT:    s_mov_b32 s27, s43
+; GFX10-NEXT:    v_writelane_b32 v40, s28, 24
+; GFX10-NEXT:    s_mov_b32 s28, s44
+; GFX10-NEXT:    v_writelane_b32 v40, s29, 25
+; GFX10-NEXT:    s_mov_b32 s29, s45
+; GFX10-NEXT:    v_writelane_b32 v40, s30, 26
+; GFX10-NEXT:    v_writelane_b32 v40, s31, 27
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 27
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 26
 ; GFX10-NEXT:    v_readlane_b32 s29, v40, 25
 ; GFX10-NEXT:    v_readlane_b32 s28, v40, 24
 ; GFX10-NEXT:    v_readlane_b32 s27, v40, 23
@@ -9393,8 +9397,8 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_inreg() #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 26
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 27
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 26
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 27
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 26
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s29, v40, 25
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s28, v40, 24
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s27, v40, 23
@@ -9458,43 +9462,39 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_i32_inreg(i32) #0 {
 ; GFX9-NEXT:    v_writelane_b32 v40, s16, 12
 ; GFX9-NEXT:    v_writelane_b32 v40, s17, 13
 ; GFX9-NEXT:    v_writelane_b32 v40, s18, 14
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX9-NEXT:    v_writelane_b32 v40, s19, 15
 ; GFX9-NEXT:    v_writelane_b32 v40, s20, 16
 ; GFX9-NEXT:    v_writelane_b32 v40, s21, 17
 ; GFX9-NEXT:    v_writelane_b32 v40, s22, 18
 ; GFX9-NEXT:    v_writelane_b32 v40, s23, 19
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_load_dword s52, s[34:35], 0x0
+; GFX9-NEXT:    ; kill: killed $sgpr34_sgpr35
+; GFX9-NEXT:    ; kill: killed $sgpr34_sgpr35
+; GFX9-NEXT:    s_load_dwordx16 s[36:51], s[34:35], 0x40
+; GFX9-NEXT:    s_load_dwordx16 s[4:19], s[34:35], 0x0
 ; GFX9-NEXT:    v_writelane_b32 v40, s24, 20
 ; GFX9-NEXT:    v_writelane_b32 v40, s25, 21
-; GFX9-NEXT:    v_writelane_b32 v40, s26, 22
-; GFX9-NEXT:    v_writelane_b32 v40, s27, 23
-; GFX9-NEXT:    v_writelane_b32 v40, s28, 24
-; GFX9-NEXT:    v_writelane_b32 v40, s29, 25
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 26
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 27
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    v_writelane_b32 v40, s26, 22
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_load_dword s34, s[30:31], 0x0
-; GFX9-NEXT:    ; kill: killed $sgpr30_sgpr31
-; GFX9-NEXT:    ; kill: killed $sgpr30_sgpr31
-; GFX9-NEXT:    s_load_dwordx16 s[36:51], s[30:31], 0x40
-; GFX9-NEXT:    s_load_dwordx16 s[4:19], s[30:31], 0x0
-; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s34
+; GFX9-NEXT:    v_mov_b32_e32 v0, s52
+; GFX9-NEXT:    v_writelane_b32 v40, s27, 23
 ; GFX9-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:24
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_v32i32_i32_inreg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_v32i32_i32_inreg at rel32@hi+12
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s46
+; GFX9-NEXT:    v_writelane_b32 v40, s28, 24
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s47
+; GFX9-NEXT:    v_mov_b32_e32 v2, s48
 ; GFX9-NEXT:    buffer_store_dword v0, off, s[0:3], s32
 ; GFX9-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4
-; GFX9-NEXT:    v_mov_b32_e32 v0, s48
-; GFX9-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:8
+; GFX9-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:8
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s49
+; GFX9-NEXT:    v_writelane_b32 v40, s29, 25
 ; GFX9-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:12
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s50
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 26
 ; GFX9-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:16
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s51
 ; GFX9-NEXT:    s_mov_b32 s20, s36
@@ -9507,10 +9507,14 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_i32_inreg(i32) #0 {
 ; GFX9-NEXT:    s_mov_b32 s27, s43
 ; GFX9-NEXT:    s_mov_b32 s28, s44
 ; GFX9-NEXT:    s_mov_b32 s29, s45
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 27
 ; GFX9-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:20
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 26
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_v32i32_i32_inreg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_v32i32_i32_inreg at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 27
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 26
 ; GFX9-NEXT:    v_readlane_b32 s29, v40, 25
 ; GFX9-NEXT:    v_readlane_b32 s28, v40, 24
 ; GFX9-NEXT:    v_readlane_b32 s27, v40, 23
@@ -9554,6 +9558,7 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_i32_inreg(i32) #0 {
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_mov_b32 exec_lo, s34
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 28
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s4, 0
@@ -9572,57 +9577,56 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_i32_inreg(i32) #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s17, 13
 ; GFX10-NEXT:    v_writelane_b32 v40, s18, 14
 ; GFX10-NEXT:    v_writelane_b32 v40, s19, 15
-; GFX10-NEXT:    v_writelane_b32 v40, s20, 16
-; GFX10-NEXT:    v_writelane_b32 v40, s21, 17
-; GFX10-NEXT:    v_writelane_b32 v40, s22, 18
-; GFX10-NEXT:    v_writelane_b32 v40, s23, 19
-; GFX10-NEXT:    v_writelane_b32 v40, s24, 20
-; GFX10-NEXT:    v_writelane_b32 v40, s25, 21
-; GFX10-NEXT:    v_writelane_b32 v40, s26, 22
-; GFX10-NEXT:    v_writelane_b32 v40, s27, 23
-; GFX10-NEXT:    v_writelane_b32 v40, s28, 24
-; GFX10-NEXT:    v_writelane_b32 v40, s29, 25
-; GFX10-NEXT:    v_writelane_b32 v40, s30, 26
-; GFX10-NEXT:    v_writelane_b32 v40, s31, 27
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x2
-; GFX10-NEXT:    s_load_dword s34, s[30:31], 0x0
+; GFX10-NEXT:    s_load_dword s52, s[34:35], 0x0
 ; GFX10-NEXT:    ; meta instruction
 ; GFX10-NEXT:    ; meta instruction
-; GFX10-NEXT:    s_load_dwordx16 s[36:51], s[30:31], 0x40
-; GFX10-NEXT:    s_load_dwordx16 s[4:19], s[30:31], 0x0
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_v32i32_i32_inreg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_v32i32_i32_inreg at rel32@hi+12
+; GFX10-NEXT:    s_load_dwordx16 s[36:51], s[34:35], 0x40
+; GFX10-NEXT:    s_load_dwordx16 s[4:19], s[34:35], 0x0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_v32i32_i32_inreg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_v32i32_i32_inreg at rel32@hi+12
+; GFX10-NEXT:    v_writelane_b32 v40, s20, 16
+; GFX10-NEXT:    v_writelane_b32 v40, s21, 17
+; GFX10-NEXT:    v_writelane_b32 v40, s22, 18
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    v_mov_b32_e32 v0, s34
+; GFX10-NEXT:    v_mov_b32_e32 v0, s52
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s47
-; GFX10-NEXT:    v_mov_b32_e32 v2, s48
+; GFX10-NEXT:    v_writelane_b32 v40, s23, 19
 ; GFX10-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:24
 ; GFX10-NEXT:    v_mov_b32_e32 v0, s46
+; GFX10-NEXT:    v_mov_b32_e32 v2, s48
 ; GFX10-NEXT:    v_mov_b32_e32 v3, s49
+; GFX10-NEXT:    v_writelane_b32 v40, s24, 20
 ; GFX10-NEXT:    s_mov_b32 s20, s36
 ; GFX10-NEXT:    s_mov_b32 s21, s37
 ; GFX10-NEXT:    s_mov_b32 s22, s38
 ; GFX10-NEXT:    s_mov_b32 s23, s39
+; GFX10-NEXT:    v_writelane_b32 v40, s25, 21
 ; GFX10-NEXT:    s_mov_b32 s24, s40
 ; GFX10-NEXT:    s_mov_b32 s25, s41
-; GFX10-NEXT:    s_mov_b32 s26, s42
-; GFX10-NEXT:    s_mov_b32 s27, s43
-; GFX10-NEXT:    s_mov_b32 s28, s44
-; GFX10-NEXT:    s_mov_b32 s29, s45
 ; GFX10-NEXT:    v_mov_b32_e32 v4, s50
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s51
+; GFX10-NEXT:    v_writelane_b32 v40, s26, 22
+; GFX10-NEXT:    s_mov_b32 s26, s42
 ; GFX10-NEXT:    buffer_store_dword v0, off, s[0:3], s32
 ; GFX10-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4
 ; GFX10-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:8
 ; GFX10-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:12
 ; GFX10-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:16
 ; GFX10-NEXT:    buffer_store_dword v5, off, s[0:3], s32 offset:20
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 26
+; GFX10-NEXT:    v_writelane_b32 v40, s27, 23
+; GFX10-NEXT:    s_mov_b32 s27, s43
+; GFX10-NEXT:    v_writelane_b32 v40, s28, 24
+; GFX10-NEXT:    s_mov_b32 s28, s44
+; GFX10-NEXT:    v_writelane_b32 v40, s29, 25
+; GFX10-NEXT:    s_mov_b32 s29, s45
+; GFX10-NEXT:    v_writelane_b32 v40, s30, 26
+; GFX10-NEXT:    v_writelane_b32 v40, s31, 27
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 27
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 26
 ; GFX10-NEXT:    v_readlane_b32 s29, v40, 25
 ; GFX10-NEXT:    v_readlane_b32 s28, v40, 24
 ; GFX10-NEXT:    v_readlane_b32 s27, v40, 23
@@ -9730,8 +9734,8 @@ define amdgpu_gfx void @test_call_external_void_func_v32i32_i32_inreg(i32) #0 {
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s30, 26
 ; GFX10-SCRATCH-NEXT:    v_writelane_b32 v40, s31, 27
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 26
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 27
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 26
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s29, v40, 25
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s28, v40, 24
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s27, v40, 23
@@ -9787,16 +9791,16 @@ define amdgpu_gfx void @stack_passed_arg_alignment_v32i32_f64(<32 x i32> %val, d
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, stack_passed_f64_arg at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, stack_passed_f64_arg at rel32@hi+12
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, stack_passed_f64_arg at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, stack_passed_f64_arg at rel32@hi+12
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    buffer_store_dword v32, off, s[0:3], s32
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    buffer_store_dword v33, off, s[0:3], s32 offset:4
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -9820,17 +9824,17 @@ define amdgpu_gfx void @stack_passed_arg_alignment_v32i32_f64(<32 x i32> %val, d
 ; GFX10-NEXT:    buffer_load_dword v33, off, s[0:3], s33 offset:4
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, stack_passed_f64_arg at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, stack_passed_f64_arg at rel32@hi+12
 ; GFX10-NEXT:    s_waitcnt vmcnt(1)
 ; GFX10-NEXT:    buffer_store_dword v32, off, s[0:3], s32
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    buffer_store_dword v33, off, s[0:3], s32 offset:4
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, stack_passed_f64_arg at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, stack_passed_f64_arg at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -9860,8 +9864,8 @@ define amdgpu_gfx void @stack_passed_arg_alignment_v32i32_f64(<32 x i32> %val, d
 ; GFX10-SCRATCH-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-SCRATCH-NEXT:    scratch_store_dwordx2 off, v[32:33], s32
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -9927,12 +9931,12 @@ define amdgpu_gfx void @stack_12xv3i32() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v30, 10
 ; GFX9-NEXT:    v_mov_b32_e32 v31, 11
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_12xv3i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_12xv3i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_12xv3i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_12xv3i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -9994,12 +9998,12 @@ define amdgpu_gfx void @stack_12xv3i32() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v30, 10
 ; GFX10-NEXT:    v_mov_b32_e32 v31, 11
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_12xv3i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_12xv3i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_12xv3i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_12xv3i32 at rel32@hi+12
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -10063,8 +10067,8 @@ define amdgpu_gfx void @stack_12xv3i32() #0 {
 ; GFX10-SCRATCH-NEXT:    s_add_u32 s0, s0, external_void_func_12xv3i32 at rel32@lo+4
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_12xv3i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -10150,12 +10154,12 @@ define amdgpu_gfx void @stack_8xv5i32() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v30, 6
 ; GFX9-NEXT:    v_mov_b32_e32 v31, 7
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_8xv5i32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_8xv5i32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_8xv5i32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_8xv5i32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -10225,12 +10229,12 @@ define amdgpu_gfx void @stack_8xv5i32() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v30, 6
 ; GFX10-NEXT:    v_mov_b32_e32 v31, 7
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_8xv5i32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_8xv5i32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_8xv5i32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_8xv5i32 at rel32@hi+12
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -10299,8 +10303,8 @@ define amdgpu_gfx void @stack_8xv5i32() #0 {
 ; GFX10-SCRATCH-NEXT:    s_add_u32 s0, s0, external_void_func_8xv5i32 at rel32@lo+4
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_8xv5i32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1
@@ -10382,12 +10386,12 @@ define amdgpu_gfx void @stack_8xv5f32() #0 {
 ; GFX9-NEXT:    v_mov_b32_e32 v30, 0x40c00000
 ; GFX9-NEXT:    v_mov_b32_e32 v31, 0x40e00000
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_8xv5f32 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_8xv5f32 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_8xv5f32 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_8xv5f32 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -10457,12 +10461,12 @@ define amdgpu_gfx void @stack_8xv5f32() #0 {
 ; GFX10-NEXT:    v_mov_b32_e32 v30, 0x40c00000
 ; GFX10-NEXT:    v_mov_b32_e32 v31, 0x40e00000
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_8xv5f32 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_8xv5f32 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_8xv5f32 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_8xv5f32 at rel32@hi+12
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -10531,8 +10535,8 @@ define amdgpu_gfx void @stack_8xv5f32() #0 {
 ; GFX10-SCRATCH-NEXT:    s_add_u32 s0, s0, external_void_func_8xv5f32 at rel32@lo+4
 ; GFX10-SCRATCH-NEXT:    s_addc_u32 s1, s1, external_void_func_8xv5f32 at rel32@hi+12
 ; GFX10-SCRATCH-NEXT:    s_swappc_b64 s[30:31], s[0:1]
-; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-SCRATCH-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-SCRATCH-NEXT:    s_add_i32 s32, s32, -16
 ; GFX10-SCRATCH-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-SCRATCH-NEXT:    s_or_saveexec_b32 s0, -1

diff  --git a/llvm/test/CodeGen/AMDGPU/gfx-callable-preserved-registers.ll b/llvm/test/CodeGen/AMDGPU/gfx-callable-preserved-registers.ll
index fa8f51cf412fa..26bc62d0a99b1 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx-callable-preserved-registers.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx-callable-preserved-registers.ll
@@ -25,8 +25,8 @@ define amdgpu_gfx void @test_call_external_void_func_void_clobber_s30_s31_call_e
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
@@ -59,8 +59,8 @@ define amdgpu_gfx void @test_call_external_void_func_void_clobber_s30_s31_call_e
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ;;#ASMEND
 ; GFX10-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 2
 ; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
@@ -77,26 +77,54 @@ define amdgpu_gfx void @test_call_external_void_func_void_clobber_s30_s31_call_e
   ret void
 }
 
-define amdgpu_gfx void @void_func_void_clobber_s30_s31() #1 {
-; GFX9-LABEL: void_func_void_clobber_s30_s31:
+define amdgpu_gfx void @void_func_void_clobber_s28_s29() #1 {
+; GFX9-LABEL: void_func_void_clobber_s28_s29:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    s_mov_b64 s[36:37], s[30:31]
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    v_writelane_b32 v0, s28, 0
+; GFX9-NEXT:    v_writelane_b32 v0, s29, 1
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; clobber
 ; GFX9-NEXT:    ;;#ASMEND
-; GFX9-NEXT:    s_setpc_b64 s[36:37]
+; GFX9-NEXT:    v_readlane_b32 s29, v0, 1
+; GFX9-NEXT:    v_readlane_b32 s28, v0, 0
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10-LABEL: void_func_void_clobber_s30_s31:
+; GFX10-LABEL: void_func_void_clobber_s28_s29:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    s_mov_b64 s[36:37], s[30:31]
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    v_writelane_b32 v0, s28, 0
+; GFX10-NEXT:    v_writelane_b32 v0, s29, 1
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; clobber
 ; GFX10-NEXT:    ;;#ASMEND
-; GFX10-NEXT:    s_setpc_b64 s[36:37]
-  call void asm sideeffect "; clobber", "~{s[30:31]}"() #0
+; GFX10-NEXT:    v_readlane_b32 s29, v0, 1
+; GFX10-NEXT:    v_readlane_b32 s28, v0, 0
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+; GCN:          v_writelane_b32 v0, s28, 0
+; GCN:          v_writelane_b32 v0, s29, 1
+
+; GCN:          v_readlane_b32 s28, v0, 0
+; GCN:          v_readlane_b32 s29, v0, 1
+  call void asm sideeffect "; clobber", "~{s[28:29]}"() #0
   ret void
 }
 
@@ -109,24 +137,24 @@ define amdgpu_gfx void @test_call_void_func_void_mayclobber_s31(i32 addrspace(1)
 ; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
 ; GFX9-NEXT:    v_writelane_b32 v40, s33, 3
 ; GFX9-NEXT:    v_writelane_b32 v40, s4, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 2
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; def s31
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    s_mov_b32 s4, s31
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    s_mov_b32 s31, s4
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; use s31
 ; GFX9-NEXT:    ;;#ASMEND
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -162,8 +190,8 @@ define amdgpu_gfx void @test_call_void_func_void_mayclobber_s31(i32 addrspace(1)
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; use s31
 ; GFX10-NEXT:    ;;#ASMEND
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
@@ -196,17 +224,17 @@ define amdgpu_gfx void @test_call_void_func_void_mayclobber_v31(i32 addrspace(1)
 ; GFX9-NEXT:    ; def v31
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    v_mov_b32_e32 v41, v31
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_mov_b32_e32 v31, v41
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; use v31
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    buffer_load_dword v41, off, s[0:3], s33 ; 4-byte Folded Reload
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -232,18 +260,18 @@ define amdgpu_gfx void @test_call_void_func_void_mayclobber_v31(i32 addrspace(1)
 ; GFX10-NEXT:    ;;#ASMEND
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v41, v31
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_mov_b32_e32 v31, v41
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; use v31
 ; GFX10-NEXT:    ;;#ASMEND
 ; GFX10-NEXT:    buffer_load_dword v41, off, s[0:3], s33 ; 4-byte Folded Reload
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -276,16 +304,16 @@ define amdgpu_gfx void @test_call_void_func_void_preserves_s33(i32 addrspace(1)*
 ; GFX9-NEXT:    ; def s33
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    s_mov_b32 s4, s33
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    s_mov_b32 s33, s4
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; use s33
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -306,6 +334,9 @@ define amdgpu_gfx void @test_call_void_func_void_preserves_s33(i32 addrspace(1)*
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 3
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; def s33
 ; GFX10-NEXT:    ;;#ASMEND
@@ -313,16 +344,13 @@ define amdgpu_gfx void @test_call_void_func_void_preserves_s33(i32 addrspace(1)*
 ; GFX10-NEXT:    s_mov_b32 s4, s33
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    s_mov_b32 s33, s4
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; use s33
 ; GFX10-NEXT:    ;;#ASMEND
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
@@ -350,21 +378,21 @@ define amdgpu_gfx void @test_call_void_func_void_preserves_s34(i32 addrspace(1)*
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 1
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 2
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; def s34
 ; GFX9-NEXT:    ;;#ASMEND
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 2
 ; GFX9-NEXT:    s_mov_b32 s4, s34
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    s_mov_b32 s34, s4
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; use s34
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -385,6 +413,9 @@ define amdgpu_gfx void @test_call_void_func_void_preserves_s34(i32 addrspace(1)*
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 3
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[36:37]
+; GFX10-NEXT:    s_add_u32 s36, s36, external_void_func_void at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s37, s37, external_void_func_void at rel32@hi+12
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; def s34
 ; GFX10-NEXT:    ;;#ASMEND
@@ -392,16 +423,13 @@ define amdgpu_gfx void @test_call_void_func_void_preserves_s34(i32 addrspace(1)*
 ; GFX10-NEXT:    s_mov_b32 s4, s34
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[36:37]
 ; GFX10-NEXT:    s_mov_b32 s34, s4
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; use s34
 ; GFX10-NEXT:    ;;#ASMEND
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
@@ -433,16 +461,16 @@ define amdgpu_gfx void @test_call_void_func_void_preserves_v40(i32 addrspace(1)*
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; def v40
 ; GFX9-NEXT:    ;;#ASMEND
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; use v40
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    buffer_load_dword v40, off, s[0:3], s33 ; 4-byte Folded Reload
-; GFX9-NEXT:    v_readlane_b32 s30, v41, 0
 ; GFX9-NEXT:    v_readlane_b32 s31, v41, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v41, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v41, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -467,17 +495,17 @@ define amdgpu_gfx void @test_call_void_func_void_preserves_v40(i32 addrspace(1)*
 ; GFX10-NEXT:    ; def v40
 ; GFX10-NEXT:    ;;#ASMEND
 ; GFX10-NEXT:    v_writelane_b32 v41, s30, 0
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v41, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; use v40
 ; GFX10-NEXT:    ;;#ASMEND
 ; GFX10-NEXT:    buffer_load_dword v40, off, s[0:3], s33 ; 4-byte Folded Reload
-; GFX10-NEXT:    v_readlane_b32 s30, v41, 0
 ; GFX10-NEXT:    v_readlane_b32 s31, v41, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v41, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v41, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -588,12 +616,12 @@ define amdgpu_gfx void @test_call_void_func_void_clobber_s33() #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, void_func_void_clobber_s33 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, void_func_void_clobber_s33 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, void_func_void_clobber_s33 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, void_func_void_clobber_s33 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -613,14 +641,14 @@ define amdgpu_gfx void @test_call_void_func_void_clobber_s33() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, void_func_void_clobber_s33 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, void_func_void_clobber_s33 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, void_func_void_clobber_s33 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, void_func_void_clobber_s33 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -645,12 +673,12 @@ define amdgpu_gfx void @test_call_void_func_void_clobber_s34() #0 {
 ; GFX9-NEXT:    s_addk_i32 s32, 0x400
 ; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, void_func_void_clobber_s34 at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, void_func_void_clobber_s34 at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, void_func_void_clobber_s34 at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, void_func_void_clobber_s34 at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -670,14 +698,14 @@ define amdgpu_gfx void @test_call_void_func_void_clobber_s34() #0 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 2
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, void_func_void_clobber_s34 at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, void_func_void_clobber_s34 at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, void_func_void_clobber_s34 at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, void_func_void_clobber_s34 at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
 ; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
@@ -707,15 +735,15 @@ define amdgpu_gfx void @callee_saved_sgpr_kernel() #1 {
 ; GFX9-NEXT:    ; def s40
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    s_mov_b32 s4, s40
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; use s4
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -736,6 +764,9 @@ define amdgpu_gfx void @callee_saved_sgpr_kernel() #1 {
 ; GFX10-NEXT:    v_writelane_b32 v40, s33, 3
 ; GFX10-NEXT:    s_mov_b32 s33, s32
 ; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; def s40
 ; GFX10-NEXT:    ;;#ASMEND
@@ -743,15 +774,12 @@ define amdgpu_gfx void @callee_saved_sgpr_kernel() #1 {
 ; GFX10-NEXT:    s_mov_b32 s4, s40
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; use s4
 ; GFX10-NEXT:    ;;#ASMEND
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3
@@ -789,10 +817,10 @@ define amdgpu_gfx void @callee_saved_sgpr_vgpr_kernel() #1 {
 ; GFX9-NEXT:    ; def v32
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    v_mov_b32_e32 v41, v32
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-NEXT:    ;;#ASMSTART
 ; GFX9-NEXT:    ; use s4
 ; GFX9-NEXT:    ;;#ASMEND
@@ -800,8 +828,8 @@ define amdgpu_gfx void @callee_saved_sgpr_vgpr_kernel() #1 {
 ; GFX9-NEXT:    ; use v41
 ; GFX9-NEXT:    ;;#ASMEND
 ; GFX9-NEXT:    buffer_load_dword v41, off, s[0:3], s33 ; 4-byte Folded Reload
-; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 3
@@ -832,12 +860,12 @@ define amdgpu_gfx void @callee_saved_sgpr_vgpr_kernel() #1 {
 ; GFX10-NEXT:    ; def v32
 ; GFX10-NEXT:    ;;#ASMEND
 ; GFX10-NEXT:    v_mov_b32_e32 v41, v32
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, external_void_func_void at rel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, external_void_func_void at rel32@hi+12
 ; GFX10-NEXT:    v_writelane_b32 v40, s30, 1
 ; GFX10-NEXT:    v_writelane_b32 v40, s31, 2
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, external_void_func_void at rel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, external_void_func_void at rel32@hi+12
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX10-NEXT:    ;;#ASMSTART
 ; GFX10-NEXT:    ; use s4
 ; GFX10-NEXT:    ;;#ASMEND
@@ -845,8 +873,8 @@ define amdgpu_gfx void @callee_saved_sgpr_vgpr_kernel() #1 {
 ; GFX10-NEXT:    ; use v41
 ; GFX10-NEXT:    ;;#ASMEND
 ; GFX10-NEXT:    buffer_load_dword v41, off, s[0:3], s33 ; 4-byte Folded Reload
-; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s31, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 1
 ; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 3

diff  --git a/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll b/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll
index 1b84b0a45458b..1f22431168127 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx-callable-return-types.ll
@@ -23,27 +23,59 @@ define amdgpu_gfx void @call_i1() #0 {
 ; GFX9-LABEL: call_i1:
 ; GFX9:       ; %bb.0: ; %entry
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, return_i1 at gotpcrel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, return_i1 at gotpcrel32@hi+12
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    v_writelane_b32 v1, s33, 2
+; GFX9-NEXT:    s_mov_b32 s33, s32
+; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, return_i1 at gotpcrel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, return_i1 at gotpcrel32@hi+12
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
+; GFX9-NEXT:    v_writelane_b32 v1, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v1, s31, 1
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    s_setpc_b64 s[36:37]
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GFX9-NEXT:    v_readlane_b32 s31, v1, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v1, 0
+; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
+; GFX9-NEXT:    v_readlane_b32 s33, v1, 2
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: call_i1:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, return_i1 at gotpcrel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, return_i1 at gotpcrel32@hi+12
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    v_writelane_b32 v1, s33, 2
+; GFX10-NEXT:    s_mov_b32 s33, s32
+; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, return_i1 at gotpcrel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, return_i1 at gotpcrel32@hi+12
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
+; GFX10-NEXT:    v_writelane_b32 v1, s30, 0
+; GFX10-NEXT:    v_writelane_b32 v1, s31, 1
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    s_setpc_b64 s[36:37]
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GFX10-NEXT:    v_readlane_b32 s31, v1, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v1, 0
+; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
+; GFX10-NEXT:    v_readlane_b32 s33, v1, 2
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
 entry:
   call amdgpu_gfx i1 @return_i1()
   ret void
@@ -70,27 +102,59 @@ define amdgpu_gfx void @call_i16() #0 {
 ; GFX9-LABEL: call_i16:
 ; GFX9:       ; %bb.0: ; %entry
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, return_i16 at gotpcrel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, return_i16 at gotpcrel32@hi+12
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    v_writelane_b32 v1, s33, 2
+; GFX9-NEXT:    s_mov_b32 s33, s32
+; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, return_i16 at gotpcrel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, return_i16 at gotpcrel32@hi+12
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
+; GFX9-NEXT:    v_writelane_b32 v1, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v1, s31, 1
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    s_setpc_b64 s[36:37]
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GFX9-NEXT:    v_readlane_b32 s31, v1, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v1, 0
+; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
+; GFX9-NEXT:    v_readlane_b32 s33, v1, 2
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: call_i16:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, return_i16 at gotpcrel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, return_i16 at gotpcrel32@hi+12
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    v_writelane_b32 v1, s33, 2
+; GFX10-NEXT:    s_mov_b32 s33, s32
+; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, return_i16 at gotpcrel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, return_i16 at gotpcrel32@hi+12
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
+; GFX10-NEXT:    v_writelane_b32 v1, s30, 0
+; GFX10-NEXT:    v_writelane_b32 v1, s31, 1
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    s_setpc_b64 s[36:37]
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GFX10-NEXT:    v_readlane_b32 s31, v1, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v1, 0
+; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
+; GFX10-NEXT:    v_readlane_b32 s33, v1, 2
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
 entry:
   call amdgpu_gfx i16 @return_i16()
   ret void
@@ -117,27 +181,59 @@ define amdgpu_gfx void @call_2xi16() #0 {
 ; GFX9-LABEL: call_2xi16:
 ; GFX9:       ; %bb.0: ; %entry
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, return_2xi16 at gotpcrel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, return_2xi16 at gotpcrel32@hi+12
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    v_writelane_b32 v1, s33, 2
+; GFX9-NEXT:    s_mov_b32 s33, s32
+; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, return_2xi16 at gotpcrel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, return_2xi16 at gotpcrel32@hi+12
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
+; GFX9-NEXT:    v_writelane_b32 v1, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v1, s31, 1
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    s_setpc_b64 s[36:37]
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GFX9-NEXT:    v_readlane_b32 s31, v1, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v1, 0
+; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
+; GFX9-NEXT:    v_readlane_b32 s33, v1, 2
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: call_2xi16:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, return_2xi16 at gotpcrel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, return_2xi16 at gotpcrel32@hi+12
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    v_writelane_b32 v1, s33, 2
+; GFX10-NEXT:    s_mov_b32 s33, s32
+; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, return_2xi16 at gotpcrel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, return_2xi16 at gotpcrel32@hi+12
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
+; GFX10-NEXT:    v_writelane_b32 v1, s30, 0
+; GFX10-NEXT:    v_writelane_b32 v1, s31, 1
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    s_setpc_b64 s[36:37]
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GFX10-NEXT:    v_readlane_b32 s31, v1, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v1, 0
+; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
+; GFX10-NEXT:    v_readlane_b32 s33, v1, 2
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
 entry:
   call amdgpu_gfx <2 x i16> @return_2xi16()
   ret void
@@ -166,27 +262,59 @@ define amdgpu_gfx void @call_3xi16() #0 {
 ; GFX9-LABEL: call_3xi16:
 ; GFX9:       ; %bb.0: ; %entry
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, return_3xi16 at gotpcrel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, return_3xi16 at gotpcrel32@hi+12
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    v_writelane_b32 v2, s33, 2
+; GFX9-NEXT:    s_mov_b32 s33, s32
+; GFX9-NEXT:    s_addk_i32 s32, 0x400
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, return_3xi16 at gotpcrel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, return_3xi16 at gotpcrel32@hi+12
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
+; GFX9-NEXT:    v_writelane_b32 v2, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v2, s31, 1
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-NEXT:    s_setpc_b64 s[36:37]
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GFX9-NEXT:    v_readlane_b32 s31, v2, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v2, 0
+; GFX9-NEXT:    s_addk_i32 s32, 0xfc00
+; GFX9-NEXT:    v_readlane_b32 s33, v2, 2
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: call_3xi16:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, return_3xi16 at gotpcrel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, return_3xi16 at gotpcrel32@hi+12
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    v_writelane_b32 v2, s33, 2
+; GFX10-NEXT:    s_mov_b32 s33, s32
+; GFX10-NEXT:    s_addk_i32 s32, 0x200
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, return_3xi16 at gotpcrel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, return_3xi16 at gotpcrel32@hi+12
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
+; GFX10-NEXT:    v_writelane_b32 v2, s30, 0
+; GFX10-NEXT:    v_writelane_b32 v2, s31, 1
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX10-NEXT:    s_setpc_b64 s[36:37]
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GFX10-NEXT:    v_readlane_b32 s31, v2, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v2, 0
+; GFX10-NEXT:    s_addk_i32 s32, 0xfe00
+; GFX10-NEXT:    v_readlane_b32 s33, v2, 2
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
 entry:
   call amdgpu_gfx <3 x i16> @return_3xi16()
   ret void
@@ -1241,41 +1369,63 @@ define amdgpu_gfx void @call_512xi32() #0 {
 ; GFX9-LABEL: call_512xi32:
 ; GFX9:       ; %bb.0: ; %entry
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    s_mov_b32 s34, s33
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:2048 ; 4-byte Folded Spill
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    v_writelane_b32 v2, s33, 2
 ; GFX9-NEXT:    s_add_i32 s33, s32, 0x1ffc0
 ; GFX9-NEXT:    s_and_b32 s33, s33, 0xfffe0000
 ; GFX9-NEXT:    s_add_i32 s32, s32, 0x60000
-; GFX9-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GFX9-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-NEXT:    s_add_u32 s30, s30, return_512xi32 at gotpcrel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s31, s31, return_512xi32 at gotpcrel32@hi+12
-; GFX9-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-NEXT:    s_add_u32 s34, s34, return_512xi32 at gotpcrel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s35, s35, return_512xi32 at gotpcrel32@hi+12
+; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
+; GFX9-NEXT:    v_writelane_b32 v2, s30, 0
 ; GFX9-NEXT:    v_lshrrev_b32_e64 v0, 6, s33
+; GFX9-NEXT:    v_writelane_b32 v2, s31, 1
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GFX9-NEXT:    v_readlane_b32 s31, v2, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v2, 0
 ; GFX9-NEXT:    s_add_i32 s32, s32, 0xfffa0000
-; GFX9-NEXT:    s_mov_b32 s33, s34
-; GFX9-NEXT:    s_setpc_b64 s[36:37]
+; GFX9-NEXT:    v_readlane_b32 s33, v2, 2
+; GFX9-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:2048 ; 4-byte Folded Reload
+; GFX9-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-NEXT:    s_waitcnt vmcnt(0)
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: call_512xi32:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    s_mov_b32 s34, s33
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:2048 ; 4-byte Folded Spill
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    v_writelane_b32 v2, s33, 2
 ; GFX10-NEXT:    s_add_i32 s33, s32, 0xffe0
 ; GFX10-NEXT:    s_add_i32 s32, s32, 0x30000
 ; GFX10-NEXT:    s_and_b32 s33, s33, 0xffff0000
-; GFX10-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GFX10-NEXT:    s_getpc_b64 s[30:31]
-; GFX10-NEXT:    s_add_u32 s30, s30, return_512xi32 at gotpcrel32@lo+4
-; GFX10-NEXT:    s_addc_u32 s31, s31, return_512xi32 at gotpcrel32@hi+12
+; GFX10-NEXT:    s_getpc_b64 s[34:35]
+; GFX10-NEXT:    s_add_u32 s34, s34, return_512xi32 at gotpcrel32@lo+4
+; GFX10-NEXT:    s_addc_u32 s35, s35, return_512xi32 at gotpcrel32@hi+12
+; GFX10-NEXT:    v_writelane_b32 v2, s30, 0
+; GFX10-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX10-NEXT:    v_lshrrev_b32_e64 v0, 5, s33
-; GFX10-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX10-NEXT:    v_writelane_b32 v2, s31, 1
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX10-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GFX10-NEXT:    v_readlane_b32 s31, v2, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v2, 0
 ; GFX10-NEXT:    s_add_i32 s32, s32, 0xfffd0000
-; GFX10-NEXT:    s_mov_b32 s33, s34
-; GFX10-NEXT:    s_setpc_b64 s[36:37]
+; GFX10-NEXT:    v_readlane_b32 s33, v2, 2
+; GFX10-NEXT:    s_or_saveexec_b32 s34, -1
+; GFX10-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:2048 ; 4-byte Folded Reload
+; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
+; GFX10-NEXT:    s_mov_b32 exec_lo, s34
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
 entry:
   call amdgpu_gfx <512 x i32> @return_512xi32()
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/indirect-call.ll b/llvm/test/CodeGen/AMDGPU/indirect-call.ll
index b1edd78f4d4d0..4f3029b851464 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-call.ll
@@ -399,23 +399,23 @@ define void @test_indirect_call_vgpr_ptr(void()* %fptr) {
 ; GCN-NEXT:    v_writelane_b32 v40, s33, 17
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_addk_i32 s32, 0x400
-; GCN-NEXT:    v_writelane_b32 v40, s34, 0
-; GCN-NEXT:    v_writelane_b32 v40, s35, 1
-; GCN-NEXT:    v_writelane_b32 v40, s36, 2
-; GCN-NEXT:    v_writelane_b32 v40, s37, 3
-; GCN-NEXT:    v_writelane_b32 v40, s38, 4
-; GCN-NEXT:    v_writelane_b32 v40, s39, 5
-; GCN-NEXT:    v_writelane_b32 v40, s40, 6
-; GCN-NEXT:    v_writelane_b32 v40, s41, 7
-; GCN-NEXT:    v_writelane_b32 v40, s42, 8
-; GCN-NEXT:    v_writelane_b32 v40, s43, 9
-; GCN-NEXT:    v_writelane_b32 v40, s44, 10
-; GCN-NEXT:    v_writelane_b32 v40, s46, 11
-; GCN-NEXT:    v_writelane_b32 v40, s47, 12
-; GCN-NEXT:    v_writelane_b32 v40, s48, 13
-; GCN-NEXT:    v_writelane_b32 v40, s49, 14
-; GCN-NEXT:    v_writelane_b32 v40, s30, 15
-; GCN-NEXT:    v_writelane_b32 v40, s31, 16
+; GCN-NEXT:    v_writelane_b32 v40, s30, 0
+; GCN-NEXT:    v_writelane_b32 v40, s31, 1
+; GCN-NEXT:    v_writelane_b32 v40, s34, 2
+; GCN-NEXT:    v_writelane_b32 v40, s35, 3
+; GCN-NEXT:    v_writelane_b32 v40, s36, 4
+; GCN-NEXT:    v_writelane_b32 v40, s37, 5
+; GCN-NEXT:    v_writelane_b32 v40, s38, 6
+; GCN-NEXT:    v_writelane_b32 v40, s39, 7
+; GCN-NEXT:    v_writelane_b32 v40, s40, 8
+; GCN-NEXT:    v_writelane_b32 v40, s41, 9
+; GCN-NEXT:    v_writelane_b32 v40, s42, 10
+; GCN-NEXT:    v_writelane_b32 v40, s43, 11
+; GCN-NEXT:    v_writelane_b32 v40, s44, 12
+; GCN-NEXT:    v_writelane_b32 v40, s46, 13
+; GCN-NEXT:    v_writelane_b32 v40, s47, 14
+; GCN-NEXT:    v_writelane_b32 v40, s48, 15
+; GCN-NEXT:    v_writelane_b32 v40, s49, 16
 ; GCN-NEXT:    s_mov_b32 s42, s14
 ; GCN-NEXT:    s_mov_b32 s43, s13
 ; GCN-NEXT:    s_mov_b32 s44, s12
@@ -443,30 +443,30 @@ define void @test_indirect_call_vgpr_ptr(void()* %fptr) {
 ; GCN-NEXT:    s_cbranch_execnz .LBB2_1
 ; GCN-NEXT:  ; %bb.2:
 ; GCN-NEXT:    s_mov_b64 exec, s[46:47]
-; GCN-NEXT:    v_readlane_b32 s4, v40, 15
-; GCN-NEXT:    v_readlane_b32 s5, v40, 16
-; GCN-NEXT:    v_readlane_b32 s49, v40, 14
-; GCN-NEXT:    v_readlane_b32 s48, v40, 13
-; GCN-NEXT:    v_readlane_b32 s47, v40, 12
-; GCN-NEXT:    v_readlane_b32 s46, v40, 11
-; GCN-NEXT:    v_readlane_b32 s44, v40, 10
-; GCN-NEXT:    v_readlane_b32 s43, v40, 9
-; GCN-NEXT:    v_readlane_b32 s42, v40, 8
-; GCN-NEXT:    v_readlane_b32 s41, v40, 7
-; GCN-NEXT:    v_readlane_b32 s40, v40, 6
-; GCN-NEXT:    v_readlane_b32 s39, v40, 5
-; GCN-NEXT:    v_readlane_b32 s38, v40, 4
-; GCN-NEXT:    v_readlane_b32 s37, v40, 3
-; GCN-NEXT:    v_readlane_b32 s36, v40, 2
-; GCN-NEXT:    v_readlane_b32 s35, v40, 1
-; GCN-NEXT:    v_readlane_b32 s34, v40, 0
+; GCN-NEXT:    v_readlane_b32 s49, v40, 16
+; GCN-NEXT:    v_readlane_b32 s48, v40, 15
+; GCN-NEXT:    v_readlane_b32 s47, v40, 14
+; GCN-NEXT:    v_readlane_b32 s46, v40, 13
+; GCN-NEXT:    v_readlane_b32 s44, v40, 12
+; GCN-NEXT:    v_readlane_b32 s43, v40, 11
+; GCN-NEXT:    v_readlane_b32 s42, v40, 10
+; GCN-NEXT:    v_readlane_b32 s41, v40, 9
+; GCN-NEXT:    v_readlane_b32 s40, v40, 8
+; GCN-NEXT:    v_readlane_b32 s39, v40, 7
+; GCN-NEXT:    v_readlane_b32 s38, v40, 6
+; GCN-NEXT:    v_readlane_b32 s37, v40, 5
+; GCN-NEXT:    v_readlane_b32 s36, v40, 4
+; GCN-NEXT:    v_readlane_b32 s35, v40, 3
+; GCN-NEXT:    v_readlane_b32 s34, v40, 2
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
 ; GCN-NEXT:    v_readlane_b32 s33, v40, 17
-; GCN-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: test_indirect_call_vgpr_ptr:
 ; GISEL:       ; %bb.0:
@@ -477,21 +477,23 @@ define void @test_indirect_call_vgpr_ptr(void()* %fptr) {
 ; GISEL-NEXT:    v_writelane_b32 v40, s33, 17
 ; GISEL-NEXT:    s_mov_b32 s33, s32
 ; GISEL-NEXT:    s_addk_i32 s32, 0x400
-; GISEL-NEXT:    v_writelane_b32 v40, s34, 0
-; GISEL-NEXT:    v_writelane_b32 v40, s35, 1
-; GISEL-NEXT:    v_writelane_b32 v40, s36, 2
-; GISEL-NEXT:    v_writelane_b32 v40, s37, 3
-; GISEL-NEXT:    v_writelane_b32 v40, s38, 4
-; GISEL-NEXT:    v_writelane_b32 v40, s39, 5
-; GISEL-NEXT:    v_writelane_b32 v40, s40, 6
-; GISEL-NEXT:    v_writelane_b32 v40, s41, 7
-; GISEL-NEXT:    v_writelane_b32 v40, s42, 8
-; GISEL-NEXT:    v_writelane_b32 v40, s43, 9
-; GISEL-NEXT:    v_writelane_b32 v40, s44, 10
-; GISEL-NEXT:    v_writelane_b32 v40, s46, 11
-; GISEL-NEXT:    v_writelane_b32 v40, s47, 12
-; GISEL-NEXT:    v_writelane_b32 v40, s48, 13
-; GISEL-NEXT:    v_writelane_b32 v40, s49, 14
+; GISEL-NEXT:    v_writelane_b32 v40, s30, 0
+; GISEL-NEXT:    v_writelane_b32 v40, s31, 1
+; GISEL-NEXT:    v_writelane_b32 v40, s34, 2
+; GISEL-NEXT:    v_writelane_b32 v40, s35, 3
+; GISEL-NEXT:    v_writelane_b32 v40, s36, 4
+; GISEL-NEXT:    v_writelane_b32 v40, s37, 5
+; GISEL-NEXT:    v_writelane_b32 v40, s38, 6
+; GISEL-NEXT:    v_writelane_b32 v40, s39, 7
+; GISEL-NEXT:    v_writelane_b32 v40, s40, 8
+; GISEL-NEXT:    v_writelane_b32 v40, s41, 9
+; GISEL-NEXT:    v_writelane_b32 v40, s42, 10
+; GISEL-NEXT:    v_writelane_b32 v40, s43, 11
+; GISEL-NEXT:    v_writelane_b32 v40, s44, 12
+; GISEL-NEXT:    v_writelane_b32 v40, s46, 13
+; GISEL-NEXT:    v_writelane_b32 v40, s47, 14
+; GISEL-NEXT:    v_writelane_b32 v40, s48, 15
+; GISEL-NEXT:    v_writelane_b32 v40, s49, 16
 ; GISEL-NEXT:    s_mov_b32 s42, s14
 ; GISEL-NEXT:    s_mov_b32 s43, s13
 ; GISEL-NEXT:    s_mov_b32 s44, s12
@@ -499,8 +501,6 @@ define void @test_indirect_call_vgpr_ptr(void()* %fptr) {
 ; GISEL-NEXT:    s_mov_b64 s[36:37], s[8:9]
 ; GISEL-NEXT:    s_mov_b64 s[38:39], s[6:7]
 ; GISEL-NEXT:    s_mov_b64 s[40:41], s[4:5]
-; GISEL-NEXT:    v_writelane_b32 v40, s30, 15
-; GISEL-NEXT:    v_writelane_b32 v40, s31, 16
 ; GISEL-NEXT:    s_mov_b64 s[46:47], exec
 ; GISEL-NEXT:  .LBB2_1: ; =>This Inner Loop Header: Depth=1
 ; GISEL-NEXT:    v_readfirstlane_b32 s16, v0
@@ -521,30 +521,30 @@ define void @test_indirect_call_vgpr_ptr(void()* %fptr) {
 ; GISEL-NEXT:    s_cbranch_execnz .LBB2_1
 ; GISEL-NEXT:  ; %bb.2:
 ; GISEL-NEXT:    s_mov_b64 exec, s[46:47]
-; GISEL-NEXT:    v_readlane_b32 s4, v40, 15
-; GISEL-NEXT:    v_readlane_b32 s5, v40, 16
-; GISEL-NEXT:    v_readlane_b32 s49, v40, 14
-; GISEL-NEXT:    v_readlane_b32 s48, v40, 13
-; GISEL-NEXT:    v_readlane_b32 s47, v40, 12
-; GISEL-NEXT:    v_readlane_b32 s46, v40, 11
-; GISEL-NEXT:    v_readlane_b32 s44, v40, 10
-; GISEL-NEXT:    v_readlane_b32 s43, v40, 9
-; GISEL-NEXT:    v_readlane_b32 s42, v40, 8
-; GISEL-NEXT:    v_readlane_b32 s41, v40, 7
-; GISEL-NEXT:    v_readlane_b32 s40, v40, 6
-; GISEL-NEXT:    v_readlane_b32 s39, v40, 5
-; GISEL-NEXT:    v_readlane_b32 s38, v40, 4
-; GISEL-NEXT:    v_readlane_b32 s37, v40, 3
-; GISEL-NEXT:    v_readlane_b32 s36, v40, 2
-; GISEL-NEXT:    v_readlane_b32 s35, v40, 1
-; GISEL-NEXT:    v_readlane_b32 s34, v40, 0
+; GISEL-NEXT:    v_readlane_b32 s49, v40, 16
+; GISEL-NEXT:    v_readlane_b32 s48, v40, 15
+; GISEL-NEXT:    v_readlane_b32 s47, v40, 14
+; GISEL-NEXT:    v_readlane_b32 s46, v40, 13
+; GISEL-NEXT:    v_readlane_b32 s44, v40, 12
+; GISEL-NEXT:    v_readlane_b32 s43, v40, 11
+; GISEL-NEXT:    v_readlane_b32 s42, v40, 10
+; GISEL-NEXT:    v_readlane_b32 s41, v40, 9
+; GISEL-NEXT:    v_readlane_b32 s40, v40, 8
+; GISEL-NEXT:    v_readlane_b32 s39, v40, 7
+; GISEL-NEXT:    v_readlane_b32 s38, v40, 6
+; GISEL-NEXT:    v_readlane_b32 s37, v40, 5
+; GISEL-NEXT:    v_readlane_b32 s36, v40, 4
+; GISEL-NEXT:    v_readlane_b32 s35, v40, 3
+; GISEL-NEXT:    v_readlane_b32 s34, v40, 2
+; GISEL-NEXT:    v_readlane_b32 s31, v40, 1
+; GISEL-NEXT:    v_readlane_b32 s30, v40, 0
 ; GISEL-NEXT:    s_addk_i32 s32, 0xfc00
 ; GISEL-NEXT:    v_readlane_b32 s33, v40, 17
-; GISEL-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GISEL-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GISEL-NEXT:    s_mov_b64 exec, s[6:7]
+; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GISEL-NEXT:    s_setpc_b64 s[4:5]
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
   call void %fptr()
   ret void
 }
@@ -559,23 +559,23 @@ define void @test_indirect_call_vgpr_ptr_arg(void(i32)* %fptr) {
 ; GCN-NEXT:    v_writelane_b32 v40, s33, 17
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_addk_i32 s32, 0x400
-; GCN-NEXT:    v_writelane_b32 v40, s34, 0
-; GCN-NEXT:    v_writelane_b32 v40, s35, 1
-; GCN-NEXT:    v_writelane_b32 v40, s36, 2
-; GCN-NEXT:    v_writelane_b32 v40, s37, 3
-; GCN-NEXT:    v_writelane_b32 v40, s38, 4
-; GCN-NEXT:    v_writelane_b32 v40, s39, 5
-; GCN-NEXT:    v_writelane_b32 v40, s40, 6
-; GCN-NEXT:    v_writelane_b32 v40, s41, 7
-; GCN-NEXT:    v_writelane_b32 v40, s42, 8
-; GCN-NEXT:    v_writelane_b32 v40, s43, 9
-; GCN-NEXT:    v_writelane_b32 v40, s44, 10
-; GCN-NEXT:    v_writelane_b32 v40, s46, 11
-; GCN-NEXT:    v_writelane_b32 v40, s47, 12
-; GCN-NEXT:    v_writelane_b32 v40, s48, 13
-; GCN-NEXT:    v_writelane_b32 v40, s49, 14
-; GCN-NEXT:    v_writelane_b32 v40, s30, 15
-; GCN-NEXT:    v_writelane_b32 v40, s31, 16
+; GCN-NEXT:    v_writelane_b32 v40, s30, 0
+; GCN-NEXT:    v_writelane_b32 v40, s31, 1
+; GCN-NEXT:    v_writelane_b32 v40, s34, 2
+; GCN-NEXT:    v_writelane_b32 v40, s35, 3
+; GCN-NEXT:    v_writelane_b32 v40, s36, 4
+; GCN-NEXT:    v_writelane_b32 v40, s37, 5
+; GCN-NEXT:    v_writelane_b32 v40, s38, 6
+; GCN-NEXT:    v_writelane_b32 v40, s39, 7
+; GCN-NEXT:    v_writelane_b32 v40, s40, 8
+; GCN-NEXT:    v_writelane_b32 v40, s41, 9
+; GCN-NEXT:    v_writelane_b32 v40, s42, 10
+; GCN-NEXT:    v_writelane_b32 v40, s43, 11
+; GCN-NEXT:    v_writelane_b32 v40, s44, 12
+; GCN-NEXT:    v_writelane_b32 v40, s46, 13
+; GCN-NEXT:    v_writelane_b32 v40, s47, 14
+; GCN-NEXT:    v_writelane_b32 v40, s48, 15
+; GCN-NEXT:    v_writelane_b32 v40, s49, 16
 ; GCN-NEXT:    s_mov_b32 s42, s14
 ; GCN-NEXT:    s_mov_b32 s43, s13
 ; GCN-NEXT:    s_mov_b32 s44, s12
@@ -606,30 +606,30 @@ define void @test_indirect_call_vgpr_ptr_arg(void(i32)* %fptr) {
 ; GCN-NEXT:    s_cbranch_execnz .LBB3_1
 ; GCN-NEXT:  ; %bb.2:
 ; GCN-NEXT:    s_mov_b64 exec, s[46:47]
-; GCN-NEXT:    v_readlane_b32 s4, v40, 15
-; GCN-NEXT:    v_readlane_b32 s5, v40, 16
-; GCN-NEXT:    v_readlane_b32 s49, v40, 14
-; GCN-NEXT:    v_readlane_b32 s48, v40, 13
-; GCN-NEXT:    v_readlane_b32 s47, v40, 12
-; GCN-NEXT:    v_readlane_b32 s46, v40, 11
-; GCN-NEXT:    v_readlane_b32 s44, v40, 10
-; GCN-NEXT:    v_readlane_b32 s43, v40, 9
-; GCN-NEXT:    v_readlane_b32 s42, v40, 8
-; GCN-NEXT:    v_readlane_b32 s41, v40, 7
-; GCN-NEXT:    v_readlane_b32 s40, v40, 6
-; GCN-NEXT:    v_readlane_b32 s39, v40, 5
-; GCN-NEXT:    v_readlane_b32 s38, v40, 4
-; GCN-NEXT:    v_readlane_b32 s37, v40, 3
-; GCN-NEXT:    v_readlane_b32 s36, v40, 2
-; GCN-NEXT:    v_readlane_b32 s35, v40, 1
-; GCN-NEXT:    v_readlane_b32 s34, v40, 0
+; GCN-NEXT:    v_readlane_b32 s49, v40, 16
+; GCN-NEXT:    v_readlane_b32 s48, v40, 15
+; GCN-NEXT:    v_readlane_b32 s47, v40, 14
+; GCN-NEXT:    v_readlane_b32 s46, v40, 13
+; GCN-NEXT:    v_readlane_b32 s44, v40, 12
+; GCN-NEXT:    v_readlane_b32 s43, v40, 11
+; GCN-NEXT:    v_readlane_b32 s42, v40, 10
+; GCN-NEXT:    v_readlane_b32 s41, v40, 9
+; GCN-NEXT:    v_readlane_b32 s40, v40, 8
+; GCN-NEXT:    v_readlane_b32 s39, v40, 7
+; GCN-NEXT:    v_readlane_b32 s38, v40, 6
+; GCN-NEXT:    v_readlane_b32 s37, v40, 5
+; GCN-NEXT:    v_readlane_b32 s36, v40, 4
+; GCN-NEXT:    v_readlane_b32 s35, v40, 3
+; GCN-NEXT:    v_readlane_b32 s34, v40, 2
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
 ; GCN-NEXT:    v_readlane_b32 s33, v40, 17
-; GCN-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: test_indirect_call_vgpr_ptr_arg:
 ; GISEL:       ; %bb.0:
@@ -640,21 +640,23 @@ define void @test_indirect_call_vgpr_ptr_arg(void(i32)* %fptr) {
 ; GISEL-NEXT:    v_writelane_b32 v40, s33, 17
 ; GISEL-NEXT:    s_mov_b32 s33, s32
 ; GISEL-NEXT:    s_addk_i32 s32, 0x400
-; GISEL-NEXT:    v_writelane_b32 v40, s34, 0
-; GISEL-NEXT:    v_writelane_b32 v40, s35, 1
-; GISEL-NEXT:    v_writelane_b32 v40, s36, 2
-; GISEL-NEXT:    v_writelane_b32 v40, s37, 3
-; GISEL-NEXT:    v_writelane_b32 v40, s38, 4
-; GISEL-NEXT:    v_writelane_b32 v40, s39, 5
-; GISEL-NEXT:    v_writelane_b32 v40, s40, 6
-; GISEL-NEXT:    v_writelane_b32 v40, s41, 7
-; GISEL-NEXT:    v_writelane_b32 v40, s42, 8
-; GISEL-NEXT:    v_writelane_b32 v40, s43, 9
-; GISEL-NEXT:    v_writelane_b32 v40, s44, 10
-; GISEL-NEXT:    v_writelane_b32 v40, s46, 11
-; GISEL-NEXT:    v_writelane_b32 v40, s47, 12
-; GISEL-NEXT:    v_writelane_b32 v40, s48, 13
-; GISEL-NEXT:    v_writelane_b32 v40, s49, 14
+; GISEL-NEXT:    v_writelane_b32 v40, s30, 0
+; GISEL-NEXT:    v_writelane_b32 v40, s31, 1
+; GISEL-NEXT:    v_writelane_b32 v40, s34, 2
+; GISEL-NEXT:    v_writelane_b32 v40, s35, 3
+; GISEL-NEXT:    v_writelane_b32 v40, s36, 4
+; GISEL-NEXT:    v_writelane_b32 v40, s37, 5
+; GISEL-NEXT:    v_writelane_b32 v40, s38, 6
+; GISEL-NEXT:    v_writelane_b32 v40, s39, 7
+; GISEL-NEXT:    v_writelane_b32 v40, s40, 8
+; GISEL-NEXT:    v_writelane_b32 v40, s41, 9
+; GISEL-NEXT:    v_writelane_b32 v40, s42, 10
+; GISEL-NEXT:    v_writelane_b32 v40, s43, 11
+; GISEL-NEXT:    v_writelane_b32 v40, s44, 12
+; GISEL-NEXT:    v_writelane_b32 v40, s46, 13
+; GISEL-NEXT:    v_writelane_b32 v40, s47, 14
+; GISEL-NEXT:    v_writelane_b32 v40, s48, 15
+; GISEL-NEXT:    v_writelane_b32 v40, s49, 16
 ; GISEL-NEXT:    s_mov_b32 s42, s14
 ; GISEL-NEXT:    s_mov_b32 s43, s13
 ; GISEL-NEXT:    s_mov_b32 s44, s12
@@ -662,8 +664,6 @@ define void @test_indirect_call_vgpr_ptr_arg(void(i32)* %fptr) {
 ; GISEL-NEXT:    s_mov_b64 s[36:37], s[8:9]
 ; GISEL-NEXT:    s_mov_b64 s[38:39], s[6:7]
 ; GISEL-NEXT:    s_mov_b64 s[40:41], s[4:5]
-; GISEL-NEXT:    v_writelane_b32 v40, s30, 15
-; GISEL-NEXT:    v_writelane_b32 v40, s31, 16
 ; GISEL-NEXT:    s_mov_b64 s[46:47], exec
 ; GISEL-NEXT:  .LBB3_1: ; =>This Inner Loop Header: Depth=1
 ; GISEL-NEXT:    v_readfirstlane_b32 s16, v0
@@ -685,30 +685,30 @@ define void @test_indirect_call_vgpr_ptr_arg(void(i32)* %fptr) {
 ; GISEL-NEXT:    s_cbranch_execnz .LBB3_1
 ; GISEL-NEXT:  ; %bb.2:
 ; GISEL-NEXT:    s_mov_b64 exec, s[46:47]
-; GISEL-NEXT:    v_readlane_b32 s4, v40, 15
-; GISEL-NEXT:    v_readlane_b32 s5, v40, 16
-; GISEL-NEXT:    v_readlane_b32 s49, v40, 14
-; GISEL-NEXT:    v_readlane_b32 s48, v40, 13
-; GISEL-NEXT:    v_readlane_b32 s47, v40, 12
-; GISEL-NEXT:    v_readlane_b32 s46, v40, 11
-; GISEL-NEXT:    v_readlane_b32 s44, v40, 10
-; GISEL-NEXT:    v_readlane_b32 s43, v40, 9
-; GISEL-NEXT:    v_readlane_b32 s42, v40, 8
-; GISEL-NEXT:    v_readlane_b32 s41, v40, 7
-; GISEL-NEXT:    v_readlane_b32 s40, v40, 6
-; GISEL-NEXT:    v_readlane_b32 s39, v40, 5
-; GISEL-NEXT:    v_readlane_b32 s38, v40, 4
-; GISEL-NEXT:    v_readlane_b32 s37, v40, 3
-; GISEL-NEXT:    v_readlane_b32 s36, v40, 2
-; GISEL-NEXT:    v_readlane_b32 s35, v40, 1
-; GISEL-NEXT:    v_readlane_b32 s34, v40, 0
+; GISEL-NEXT:    v_readlane_b32 s49, v40, 16
+; GISEL-NEXT:    v_readlane_b32 s48, v40, 15
+; GISEL-NEXT:    v_readlane_b32 s47, v40, 14
+; GISEL-NEXT:    v_readlane_b32 s46, v40, 13
+; GISEL-NEXT:    v_readlane_b32 s44, v40, 12
+; GISEL-NEXT:    v_readlane_b32 s43, v40, 11
+; GISEL-NEXT:    v_readlane_b32 s42, v40, 10
+; GISEL-NEXT:    v_readlane_b32 s41, v40, 9
+; GISEL-NEXT:    v_readlane_b32 s40, v40, 8
+; GISEL-NEXT:    v_readlane_b32 s39, v40, 7
+; GISEL-NEXT:    v_readlane_b32 s38, v40, 6
+; GISEL-NEXT:    v_readlane_b32 s37, v40, 5
+; GISEL-NEXT:    v_readlane_b32 s36, v40, 4
+; GISEL-NEXT:    v_readlane_b32 s35, v40, 3
+; GISEL-NEXT:    v_readlane_b32 s34, v40, 2
+; GISEL-NEXT:    v_readlane_b32 s31, v40, 1
+; GISEL-NEXT:    v_readlane_b32 s30, v40, 0
 ; GISEL-NEXT:    s_addk_i32 s32, 0xfc00
 ; GISEL-NEXT:    v_readlane_b32 s33, v40, 17
-; GISEL-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GISEL-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GISEL-NEXT:    s_mov_b64 exec, s[6:7]
+; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GISEL-NEXT:    s_setpc_b64 s[4:5]
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
   call void %fptr(i32 123)
   ret void
 }
@@ -723,23 +723,23 @@ define i32 @test_indirect_call_vgpr_ptr_ret(i32()* %fptr) {
 ; GCN-NEXT:    v_writelane_b32 v40, s33, 17
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_addk_i32 s32, 0x400
-; GCN-NEXT:    v_writelane_b32 v40, s34, 0
-; GCN-NEXT:    v_writelane_b32 v40, s35, 1
-; GCN-NEXT:    v_writelane_b32 v40, s36, 2
-; GCN-NEXT:    v_writelane_b32 v40, s37, 3
-; GCN-NEXT:    v_writelane_b32 v40, s38, 4
-; GCN-NEXT:    v_writelane_b32 v40, s39, 5
-; GCN-NEXT:    v_writelane_b32 v40, s40, 6
-; GCN-NEXT:    v_writelane_b32 v40, s41, 7
-; GCN-NEXT:    v_writelane_b32 v40, s42, 8
-; GCN-NEXT:    v_writelane_b32 v40, s43, 9
-; GCN-NEXT:    v_writelane_b32 v40, s44, 10
-; GCN-NEXT:    v_writelane_b32 v40, s46, 11
-; GCN-NEXT:    v_writelane_b32 v40, s47, 12
-; GCN-NEXT:    v_writelane_b32 v40, s48, 13
-; GCN-NEXT:    v_writelane_b32 v40, s49, 14
-; GCN-NEXT:    v_writelane_b32 v40, s30, 15
-; GCN-NEXT:    v_writelane_b32 v40, s31, 16
+; GCN-NEXT:    v_writelane_b32 v40, s30, 0
+; GCN-NEXT:    v_writelane_b32 v40, s31, 1
+; GCN-NEXT:    v_writelane_b32 v40, s34, 2
+; GCN-NEXT:    v_writelane_b32 v40, s35, 3
+; GCN-NEXT:    v_writelane_b32 v40, s36, 4
+; GCN-NEXT:    v_writelane_b32 v40, s37, 5
+; GCN-NEXT:    v_writelane_b32 v40, s38, 6
+; GCN-NEXT:    v_writelane_b32 v40, s39, 7
+; GCN-NEXT:    v_writelane_b32 v40, s40, 8
+; GCN-NEXT:    v_writelane_b32 v40, s41, 9
+; GCN-NEXT:    v_writelane_b32 v40, s42, 10
+; GCN-NEXT:    v_writelane_b32 v40, s43, 11
+; GCN-NEXT:    v_writelane_b32 v40, s44, 12
+; GCN-NEXT:    v_writelane_b32 v40, s46, 13
+; GCN-NEXT:    v_writelane_b32 v40, s47, 14
+; GCN-NEXT:    v_writelane_b32 v40, s48, 15
+; GCN-NEXT:    v_writelane_b32 v40, s49, 16
 ; GCN-NEXT:    s_mov_b32 s42, s14
 ; GCN-NEXT:    s_mov_b32 s43, s13
 ; GCN-NEXT:    s_mov_b32 s44, s12
@@ -769,30 +769,30 @@ define i32 @test_indirect_call_vgpr_ptr_ret(i32()* %fptr) {
 ; GCN-NEXT:  ; %bb.2:
 ; GCN-NEXT:    s_mov_b64 exec, s[46:47]
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
-; GCN-NEXT:    v_readlane_b32 s4, v40, 15
-; GCN-NEXT:    v_readlane_b32 s5, v40, 16
-; GCN-NEXT:    v_readlane_b32 s49, v40, 14
-; GCN-NEXT:    v_readlane_b32 s48, v40, 13
-; GCN-NEXT:    v_readlane_b32 s47, v40, 12
-; GCN-NEXT:    v_readlane_b32 s46, v40, 11
-; GCN-NEXT:    v_readlane_b32 s44, v40, 10
-; GCN-NEXT:    v_readlane_b32 s43, v40, 9
-; GCN-NEXT:    v_readlane_b32 s42, v40, 8
-; GCN-NEXT:    v_readlane_b32 s41, v40, 7
-; GCN-NEXT:    v_readlane_b32 s40, v40, 6
-; GCN-NEXT:    v_readlane_b32 s39, v40, 5
-; GCN-NEXT:    v_readlane_b32 s38, v40, 4
-; GCN-NEXT:    v_readlane_b32 s37, v40, 3
-; GCN-NEXT:    v_readlane_b32 s36, v40, 2
-; GCN-NEXT:    v_readlane_b32 s35, v40, 1
-; GCN-NEXT:    v_readlane_b32 s34, v40, 0
+; GCN-NEXT:    v_readlane_b32 s49, v40, 16
+; GCN-NEXT:    v_readlane_b32 s48, v40, 15
+; GCN-NEXT:    v_readlane_b32 s47, v40, 14
+; GCN-NEXT:    v_readlane_b32 s46, v40, 13
+; GCN-NEXT:    v_readlane_b32 s44, v40, 12
+; GCN-NEXT:    v_readlane_b32 s43, v40, 11
+; GCN-NEXT:    v_readlane_b32 s42, v40, 10
+; GCN-NEXT:    v_readlane_b32 s41, v40, 9
+; GCN-NEXT:    v_readlane_b32 s40, v40, 8
+; GCN-NEXT:    v_readlane_b32 s39, v40, 7
+; GCN-NEXT:    v_readlane_b32 s38, v40, 6
+; GCN-NEXT:    v_readlane_b32 s37, v40, 5
+; GCN-NEXT:    v_readlane_b32 s36, v40, 4
+; GCN-NEXT:    v_readlane_b32 s35, v40, 3
+; GCN-NEXT:    v_readlane_b32 s34, v40, 2
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
 ; GCN-NEXT:    v_readlane_b32 s33, v40, 17
-; GCN-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: test_indirect_call_vgpr_ptr_ret:
 ; GISEL:       ; %bb.0:
@@ -803,21 +803,23 @@ define i32 @test_indirect_call_vgpr_ptr_ret(i32()* %fptr) {
 ; GISEL-NEXT:    v_writelane_b32 v40, s33, 17
 ; GISEL-NEXT:    s_mov_b32 s33, s32
 ; GISEL-NEXT:    s_addk_i32 s32, 0x400
-; GISEL-NEXT:    v_writelane_b32 v40, s34, 0
-; GISEL-NEXT:    v_writelane_b32 v40, s35, 1
-; GISEL-NEXT:    v_writelane_b32 v40, s36, 2
-; GISEL-NEXT:    v_writelane_b32 v40, s37, 3
-; GISEL-NEXT:    v_writelane_b32 v40, s38, 4
-; GISEL-NEXT:    v_writelane_b32 v40, s39, 5
-; GISEL-NEXT:    v_writelane_b32 v40, s40, 6
-; GISEL-NEXT:    v_writelane_b32 v40, s41, 7
-; GISEL-NEXT:    v_writelane_b32 v40, s42, 8
-; GISEL-NEXT:    v_writelane_b32 v40, s43, 9
-; GISEL-NEXT:    v_writelane_b32 v40, s44, 10
-; GISEL-NEXT:    v_writelane_b32 v40, s46, 11
-; GISEL-NEXT:    v_writelane_b32 v40, s47, 12
-; GISEL-NEXT:    v_writelane_b32 v40, s48, 13
-; GISEL-NEXT:    v_writelane_b32 v40, s49, 14
+; GISEL-NEXT:    v_writelane_b32 v40, s30, 0
+; GISEL-NEXT:    v_writelane_b32 v40, s31, 1
+; GISEL-NEXT:    v_writelane_b32 v40, s34, 2
+; GISEL-NEXT:    v_writelane_b32 v40, s35, 3
+; GISEL-NEXT:    v_writelane_b32 v40, s36, 4
+; GISEL-NEXT:    v_writelane_b32 v40, s37, 5
+; GISEL-NEXT:    v_writelane_b32 v40, s38, 6
+; GISEL-NEXT:    v_writelane_b32 v40, s39, 7
+; GISEL-NEXT:    v_writelane_b32 v40, s40, 8
+; GISEL-NEXT:    v_writelane_b32 v40, s41, 9
+; GISEL-NEXT:    v_writelane_b32 v40, s42, 10
+; GISEL-NEXT:    v_writelane_b32 v40, s43, 11
+; GISEL-NEXT:    v_writelane_b32 v40, s44, 12
+; GISEL-NEXT:    v_writelane_b32 v40, s46, 13
+; GISEL-NEXT:    v_writelane_b32 v40, s47, 14
+; GISEL-NEXT:    v_writelane_b32 v40, s48, 15
+; GISEL-NEXT:    v_writelane_b32 v40, s49, 16
 ; GISEL-NEXT:    s_mov_b32 s42, s14
 ; GISEL-NEXT:    s_mov_b32 s43, s13
 ; GISEL-NEXT:    s_mov_b32 s44, s12
@@ -825,8 +827,6 @@ define i32 @test_indirect_call_vgpr_ptr_ret(i32()* %fptr) {
 ; GISEL-NEXT:    s_mov_b64 s[36:37], s[8:9]
 ; GISEL-NEXT:    s_mov_b64 s[38:39], s[6:7]
 ; GISEL-NEXT:    s_mov_b64 s[40:41], s[4:5]
-; GISEL-NEXT:    v_writelane_b32 v40, s30, 15
-; GISEL-NEXT:    v_writelane_b32 v40, s31, 16
 ; GISEL-NEXT:    s_mov_b64 s[46:47], exec
 ; GISEL-NEXT:  .LBB4_1: ; =>This Inner Loop Header: Depth=1
 ; GISEL-NEXT:    v_readfirstlane_b32 s16, v0
@@ -849,30 +849,30 @@ define i32 @test_indirect_call_vgpr_ptr_ret(i32()* %fptr) {
 ; GISEL-NEXT:  ; %bb.2:
 ; GISEL-NEXT:    s_mov_b64 exec, s[46:47]
 ; GISEL-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
-; GISEL-NEXT:    v_readlane_b32 s4, v40, 15
-; GISEL-NEXT:    v_readlane_b32 s5, v40, 16
-; GISEL-NEXT:    v_readlane_b32 s49, v40, 14
-; GISEL-NEXT:    v_readlane_b32 s48, v40, 13
-; GISEL-NEXT:    v_readlane_b32 s47, v40, 12
-; GISEL-NEXT:    v_readlane_b32 s46, v40, 11
-; GISEL-NEXT:    v_readlane_b32 s44, v40, 10
-; GISEL-NEXT:    v_readlane_b32 s43, v40, 9
-; GISEL-NEXT:    v_readlane_b32 s42, v40, 8
-; GISEL-NEXT:    v_readlane_b32 s41, v40, 7
-; GISEL-NEXT:    v_readlane_b32 s40, v40, 6
-; GISEL-NEXT:    v_readlane_b32 s39, v40, 5
-; GISEL-NEXT:    v_readlane_b32 s38, v40, 4
-; GISEL-NEXT:    v_readlane_b32 s37, v40, 3
-; GISEL-NEXT:    v_readlane_b32 s36, v40, 2
-; GISEL-NEXT:    v_readlane_b32 s35, v40, 1
-; GISEL-NEXT:    v_readlane_b32 s34, v40, 0
+; GISEL-NEXT:    v_readlane_b32 s49, v40, 16
+; GISEL-NEXT:    v_readlane_b32 s48, v40, 15
+; GISEL-NEXT:    v_readlane_b32 s47, v40, 14
+; GISEL-NEXT:    v_readlane_b32 s46, v40, 13
+; GISEL-NEXT:    v_readlane_b32 s44, v40, 12
+; GISEL-NEXT:    v_readlane_b32 s43, v40, 11
+; GISEL-NEXT:    v_readlane_b32 s42, v40, 10
+; GISEL-NEXT:    v_readlane_b32 s41, v40, 9
+; GISEL-NEXT:    v_readlane_b32 s40, v40, 8
+; GISEL-NEXT:    v_readlane_b32 s39, v40, 7
+; GISEL-NEXT:    v_readlane_b32 s38, v40, 6
+; GISEL-NEXT:    v_readlane_b32 s37, v40, 5
+; GISEL-NEXT:    v_readlane_b32 s36, v40, 4
+; GISEL-NEXT:    v_readlane_b32 s35, v40, 3
+; GISEL-NEXT:    v_readlane_b32 s34, v40, 2
+; GISEL-NEXT:    v_readlane_b32 s31, v40, 1
+; GISEL-NEXT:    v_readlane_b32 s30, v40, 0
 ; GISEL-NEXT:    s_addk_i32 s32, 0xfc00
 ; GISEL-NEXT:    v_readlane_b32 s33, v40, 17
-; GISEL-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GISEL-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GISEL-NEXT:    s_mov_b64 exec, s[6:7]
+; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GISEL-NEXT:    s_setpc_b64 s[4:5]
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
   %a = call i32 %fptr()
   %b = add i32 %a, 1
   ret i32 %b
@@ -888,23 +888,25 @@ define void @test_indirect_call_vgpr_ptr_in_branch(void()* %fptr, i1 %cond) {
 ; GCN-NEXT:    v_writelane_b32 v40, s33, 19
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_addk_i32 s32, 0x400
-; GCN-NEXT:    v_writelane_b32 v40, s34, 0
-; GCN-NEXT:    v_writelane_b32 v40, s35, 1
-; GCN-NEXT:    v_writelane_b32 v40, s36, 2
-; GCN-NEXT:    v_writelane_b32 v40, s37, 3
-; GCN-NEXT:    v_writelane_b32 v40, s38, 4
-; GCN-NEXT:    v_writelane_b32 v40, s39, 5
-; GCN-NEXT:    v_writelane_b32 v40, s40, 6
-; GCN-NEXT:    v_writelane_b32 v40, s41, 7
-; GCN-NEXT:    v_writelane_b32 v40, s42, 8
-; GCN-NEXT:    v_writelane_b32 v40, s43, 9
-; GCN-NEXT:    v_writelane_b32 v40, s44, 10
-; GCN-NEXT:    v_writelane_b32 v40, s46, 11
-; GCN-NEXT:    v_writelane_b32 v40, s47, 12
-; GCN-NEXT:    v_writelane_b32 v40, s48, 13
-; GCN-NEXT:    v_writelane_b32 v40, s49, 14
-; GCN-NEXT:    v_writelane_b32 v40, s50, 15
-; GCN-NEXT:    v_writelane_b32 v40, s51, 16
+; GCN-NEXT:    v_writelane_b32 v40, s30, 0
+; GCN-NEXT:    v_writelane_b32 v40, s31, 1
+; GCN-NEXT:    v_writelane_b32 v40, s34, 2
+; GCN-NEXT:    v_writelane_b32 v40, s35, 3
+; GCN-NEXT:    v_writelane_b32 v40, s36, 4
+; GCN-NEXT:    v_writelane_b32 v40, s37, 5
+; GCN-NEXT:    v_writelane_b32 v40, s38, 6
+; GCN-NEXT:    v_writelane_b32 v40, s39, 7
+; GCN-NEXT:    v_writelane_b32 v40, s40, 8
+; GCN-NEXT:    v_writelane_b32 v40, s41, 9
+; GCN-NEXT:    v_writelane_b32 v40, s42, 10
+; GCN-NEXT:    v_writelane_b32 v40, s43, 11
+; GCN-NEXT:    v_writelane_b32 v40, s44, 12
+; GCN-NEXT:    v_writelane_b32 v40, s46, 13
+; GCN-NEXT:    v_writelane_b32 v40, s47, 14
+; GCN-NEXT:    v_writelane_b32 v40, s48, 15
+; GCN-NEXT:    v_writelane_b32 v40, s49, 16
+; GCN-NEXT:    v_writelane_b32 v40, s50, 17
+; GCN-NEXT:    v_writelane_b32 v40, s51, 18
 ; GCN-NEXT:    s_mov_b32 s42, s14
 ; GCN-NEXT:    s_mov_b32 s43, s13
 ; GCN-NEXT:    s_mov_b32 s44, s12
@@ -917,8 +919,6 @@ define void @test_indirect_call_vgpr_ptr_in_branch(void()* %fptr, i1 %cond) {
 ; GCN-NEXT:    s_and_saveexec_b64 s[46:47], vcc
 ; GCN-NEXT:    s_cbranch_execz .LBB5_4
 ; GCN-NEXT:  ; %bb.1: ; %bb1
-; GCN-NEXT:    v_writelane_b32 v40, s30, 17
-; GCN-NEXT:    v_writelane_b32 v40, s31, 18
 ; GCN-NEXT:    s_mov_b64 s[48:49], exec
 ; GCN-NEXT:  .LBB5_2: ; =>This Inner Loop Header: Depth=1
 ; GCN-NEXT:    v_readfirstlane_b32 s16, v0
@@ -939,27 +939,27 @@ define void @test_indirect_call_vgpr_ptr_in_branch(void()* %fptr, i1 %cond) {
 ; GCN-NEXT:    s_cbranch_execnz .LBB5_2
 ; GCN-NEXT:  ; %bb.3:
 ; GCN-NEXT:    s_mov_b64 exec, s[48:49]
-; GCN-NEXT:    v_readlane_b32 s30, v40, 17
-; GCN-NEXT:    v_readlane_b32 s31, v40, 18
 ; GCN-NEXT:  .LBB5_4: ; %bb2
 ; GCN-NEXT:    s_or_b64 exec, exec, s[46:47]
-; GCN-NEXT:    v_readlane_b32 s51, v40, 16
-; GCN-NEXT:    v_readlane_b32 s50, v40, 15
-; GCN-NEXT:    v_readlane_b32 s49, v40, 14
-; GCN-NEXT:    v_readlane_b32 s48, v40, 13
-; GCN-NEXT:    v_readlane_b32 s47, v40, 12
-; GCN-NEXT:    v_readlane_b32 s46, v40, 11
-; GCN-NEXT:    v_readlane_b32 s44, v40, 10
-; GCN-NEXT:    v_readlane_b32 s43, v40, 9
-; GCN-NEXT:    v_readlane_b32 s42, v40, 8
-; GCN-NEXT:    v_readlane_b32 s41, v40, 7
-; GCN-NEXT:    v_readlane_b32 s40, v40, 6
-; GCN-NEXT:    v_readlane_b32 s39, v40, 5
-; GCN-NEXT:    v_readlane_b32 s38, v40, 4
-; GCN-NEXT:    v_readlane_b32 s37, v40, 3
-; GCN-NEXT:    v_readlane_b32 s36, v40, 2
-; GCN-NEXT:    v_readlane_b32 s35, v40, 1
-; GCN-NEXT:    v_readlane_b32 s34, v40, 0
+; GCN-NEXT:    v_readlane_b32 s51, v40, 18
+; GCN-NEXT:    v_readlane_b32 s50, v40, 17
+; GCN-NEXT:    v_readlane_b32 s49, v40, 16
+; GCN-NEXT:    v_readlane_b32 s48, v40, 15
+; GCN-NEXT:    v_readlane_b32 s47, v40, 14
+; GCN-NEXT:    v_readlane_b32 s46, v40, 13
+; GCN-NEXT:    v_readlane_b32 s44, v40, 12
+; GCN-NEXT:    v_readlane_b32 s43, v40, 11
+; GCN-NEXT:    v_readlane_b32 s42, v40, 10
+; GCN-NEXT:    v_readlane_b32 s41, v40, 9
+; GCN-NEXT:    v_readlane_b32 s40, v40, 8
+; GCN-NEXT:    v_readlane_b32 s39, v40, 7
+; GCN-NEXT:    v_readlane_b32 s38, v40, 6
+; GCN-NEXT:    v_readlane_b32 s37, v40, 5
+; GCN-NEXT:    v_readlane_b32 s36, v40, 4
+; GCN-NEXT:    v_readlane_b32 s35, v40, 3
+; GCN-NEXT:    v_readlane_b32 s34, v40, 2
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
 ; GCN-NEXT:    v_readlane_b32 s33, v40, 19
 ; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
@@ -977,23 +977,25 @@ define void @test_indirect_call_vgpr_ptr_in_branch(void()* %fptr, i1 %cond) {
 ; GISEL-NEXT:    v_writelane_b32 v40, s33, 19
 ; GISEL-NEXT:    s_mov_b32 s33, s32
 ; GISEL-NEXT:    s_addk_i32 s32, 0x400
-; GISEL-NEXT:    v_writelane_b32 v40, s34, 0
-; GISEL-NEXT:    v_writelane_b32 v40, s35, 1
-; GISEL-NEXT:    v_writelane_b32 v40, s36, 2
-; GISEL-NEXT:    v_writelane_b32 v40, s37, 3
-; GISEL-NEXT:    v_writelane_b32 v40, s38, 4
-; GISEL-NEXT:    v_writelane_b32 v40, s39, 5
-; GISEL-NEXT:    v_writelane_b32 v40, s40, 6
-; GISEL-NEXT:    v_writelane_b32 v40, s41, 7
-; GISEL-NEXT:    v_writelane_b32 v40, s42, 8
-; GISEL-NEXT:    v_writelane_b32 v40, s43, 9
-; GISEL-NEXT:    v_writelane_b32 v40, s44, 10
-; GISEL-NEXT:    v_writelane_b32 v40, s46, 11
-; GISEL-NEXT:    v_writelane_b32 v40, s47, 12
-; GISEL-NEXT:    v_writelane_b32 v40, s48, 13
-; GISEL-NEXT:    v_writelane_b32 v40, s49, 14
-; GISEL-NEXT:    v_writelane_b32 v40, s50, 15
-; GISEL-NEXT:    v_writelane_b32 v40, s51, 16
+; GISEL-NEXT:    v_writelane_b32 v40, s30, 0
+; GISEL-NEXT:    v_writelane_b32 v40, s31, 1
+; GISEL-NEXT:    v_writelane_b32 v40, s34, 2
+; GISEL-NEXT:    v_writelane_b32 v40, s35, 3
+; GISEL-NEXT:    v_writelane_b32 v40, s36, 4
+; GISEL-NEXT:    v_writelane_b32 v40, s37, 5
+; GISEL-NEXT:    v_writelane_b32 v40, s38, 6
+; GISEL-NEXT:    v_writelane_b32 v40, s39, 7
+; GISEL-NEXT:    v_writelane_b32 v40, s40, 8
+; GISEL-NEXT:    v_writelane_b32 v40, s41, 9
+; GISEL-NEXT:    v_writelane_b32 v40, s42, 10
+; GISEL-NEXT:    v_writelane_b32 v40, s43, 11
+; GISEL-NEXT:    v_writelane_b32 v40, s44, 12
+; GISEL-NEXT:    v_writelane_b32 v40, s46, 13
+; GISEL-NEXT:    v_writelane_b32 v40, s47, 14
+; GISEL-NEXT:    v_writelane_b32 v40, s48, 15
+; GISEL-NEXT:    v_writelane_b32 v40, s49, 16
+; GISEL-NEXT:    v_writelane_b32 v40, s50, 17
+; GISEL-NEXT:    v_writelane_b32 v40, s51, 18
 ; GISEL-NEXT:    s_mov_b32 s42, s14
 ; GISEL-NEXT:    s_mov_b32 s43, s13
 ; GISEL-NEXT:    s_mov_b32 s44, s12
@@ -1006,8 +1008,6 @@ define void @test_indirect_call_vgpr_ptr_in_branch(void()* %fptr, i1 %cond) {
 ; GISEL-NEXT:    s_and_saveexec_b64 s[46:47], vcc
 ; GISEL-NEXT:    s_cbranch_execz .LBB5_4
 ; GISEL-NEXT:  ; %bb.1: ; %bb1
-; GISEL-NEXT:    v_writelane_b32 v40, s30, 17
-; GISEL-NEXT:    v_writelane_b32 v40, s31, 18
 ; GISEL-NEXT:    s_mov_b64 s[48:49], exec
 ; GISEL-NEXT:  .LBB5_2: ; =>This Inner Loop Header: Depth=1
 ; GISEL-NEXT:    v_readfirstlane_b32 s16, v0
@@ -1028,27 +1028,27 @@ define void @test_indirect_call_vgpr_ptr_in_branch(void()* %fptr, i1 %cond) {
 ; GISEL-NEXT:    s_cbranch_execnz .LBB5_2
 ; GISEL-NEXT:  ; %bb.3:
 ; GISEL-NEXT:    s_mov_b64 exec, s[48:49]
-; GISEL-NEXT:    v_readlane_b32 s30, v40, 17
-; GISEL-NEXT:    v_readlane_b32 s31, v40, 18
 ; GISEL-NEXT:  .LBB5_4: ; %bb2
 ; GISEL-NEXT:    s_or_b64 exec, exec, s[46:47]
-; GISEL-NEXT:    v_readlane_b32 s51, v40, 16
-; GISEL-NEXT:    v_readlane_b32 s50, v40, 15
-; GISEL-NEXT:    v_readlane_b32 s49, v40, 14
-; GISEL-NEXT:    v_readlane_b32 s48, v40, 13
-; GISEL-NEXT:    v_readlane_b32 s47, v40, 12
-; GISEL-NEXT:    v_readlane_b32 s46, v40, 11
-; GISEL-NEXT:    v_readlane_b32 s44, v40, 10
-; GISEL-NEXT:    v_readlane_b32 s43, v40, 9
-; GISEL-NEXT:    v_readlane_b32 s42, v40, 8
-; GISEL-NEXT:    v_readlane_b32 s41, v40, 7
-; GISEL-NEXT:    v_readlane_b32 s40, v40, 6
-; GISEL-NEXT:    v_readlane_b32 s39, v40, 5
-; GISEL-NEXT:    v_readlane_b32 s38, v40, 4
-; GISEL-NEXT:    v_readlane_b32 s37, v40, 3
-; GISEL-NEXT:    v_readlane_b32 s36, v40, 2
-; GISEL-NEXT:    v_readlane_b32 s35, v40, 1
-; GISEL-NEXT:    v_readlane_b32 s34, v40, 0
+; GISEL-NEXT:    v_readlane_b32 s51, v40, 18
+; GISEL-NEXT:    v_readlane_b32 s50, v40, 17
+; GISEL-NEXT:    v_readlane_b32 s49, v40, 16
+; GISEL-NEXT:    v_readlane_b32 s48, v40, 15
+; GISEL-NEXT:    v_readlane_b32 s47, v40, 14
+; GISEL-NEXT:    v_readlane_b32 s46, v40, 13
+; GISEL-NEXT:    v_readlane_b32 s44, v40, 12
+; GISEL-NEXT:    v_readlane_b32 s43, v40, 11
+; GISEL-NEXT:    v_readlane_b32 s42, v40, 10
+; GISEL-NEXT:    v_readlane_b32 s41, v40, 9
+; GISEL-NEXT:    v_readlane_b32 s40, v40, 8
+; GISEL-NEXT:    v_readlane_b32 s39, v40, 7
+; GISEL-NEXT:    v_readlane_b32 s38, v40, 6
+; GISEL-NEXT:    v_readlane_b32 s37, v40, 5
+; GISEL-NEXT:    v_readlane_b32 s36, v40, 4
+; GISEL-NEXT:    v_readlane_b32 s35, v40, 3
+; GISEL-NEXT:    v_readlane_b32 s34, v40, 2
+; GISEL-NEXT:    v_readlane_b32 s31, v40, 1
+; GISEL-NEXT:    v_readlane_b32 s30, v40, 0
 ; GISEL-NEXT:    s_addk_i32 s32, 0xfc00
 ; GISEL-NEXT:    v_readlane_b32 s33, v40, 19
 ; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
@@ -1074,90 +1074,93 @@ define void @test_indirect_call_vgpr_ptr_inreg_arg(void(i32)* %fptr) {
 ; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_mov_b64 exec, s[4:5]
-; GCN-NEXT:    v_writelane_b32 v40, s33, 30
+; GCN-NEXT:    v_writelane_b32 v40, s33, 32
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_addk_i32 s32, 0x400
-; GCN-NEXT:    v_writelane_b32 v40, s34, 0
-; GCN-NEXT:    v_writelane_b32 v40, s35, 1
-; GCN-NEXT:    v_writelane_b32 v40, s36, 2
-; GCN-NEXT:    v_writelane_b32 v40, s37, 3
-; GCN-NEXT:    v_writelane_b32 v40, s38, 4
-; GCN-NEXT:    v_writelane_b32 v40, s39, 5
-; GCN-NEXT:    v_writelane_b32 v40, s40, 6
-; GCN-NEXT:    v_writelane_b32 v40, s41, 7
-; GCN-NEXT:    v_writelane_b32 v40, s42, 8
-; GCN-NEXT:    v_writelane_b32 v40, s43, 9
-; GCN-NEXT:    v_writelane_b32 v40, s44, 10
-; GCN-NEXT:    v_writelane_b32 v40, s45, 11
-; GCN-NEXT:    v_writelane_b32 v40, s46, 12
-; GCN-NEXT:    v_writelane_b32 v40, s47, 13
-; GCN-NEXT:    v_writelane_b32 v40, s48, 14
-; GCN-NEXT:    v_writelane_b32 v40, s49, 15
-; GCN-NEXT:    v_writelane_b32 v40, s50, 16
-; GCN-NEXT:    v_writelane_b32 v40, s51, 17
-; GCN-NEXT:    v_writelane_b32 v40, s52, 18
-; GCN-NEXT:    v_writelane_b32 v40, s53, 19
-; GCN-NEXT:    v_writelane_b32 v40, s54, 20
-; GCN-NEXT:    v_writelane_b32 v40, s55, 21
-; GCN-NEXT:    v_writelane_b32 v40, s56, 22
-; GCN-NEXT:    v_writelane_b32 v40, s57, 23
-; GCN-NEXT:    v_writelane_b32 v40, s58, 24
-; GCN-NEXT:    v_writelane_b32 v40, s59, 25
-; GCN-NEXT:    v_writelane_b32 v40, s60, 26
-; GCN-NEXT:    v_writelane_b32 v40, s61, 27
-; GCN-NEXT:    v_writelane_b32 v40, s62, 28
-; GCN-NEXT:    v_writelane_b32 v40, s63, 29
-; GCN-NEXT:    s_mov_b64 s[6:7], s[30:31]
-; GCN-NEXT:    s_mov_b64 s[8:9], exec
+; GCN-NEXT:    v_writelane_b32 v40, s30, 0
+; GCN-NEXT:    v_writelane_b32 v40, s31, 1
+; GCN-NEXT:    v_writelane_b32 v40, s34, 2
+; GCN-NEXT:    v_writelane_b32 v40, s35, 3
+; GCN-NEXT:    v_writelane_b32 v40, s36, 4
+; GCN-NEXT:    v_writelane_b32 v40, s37, 5
+; GCN-NEXT:    v_writelane_b32 v40, s38, 6
+; GCN-NEXT:    v_writelane_b32 v40, s39, 7
+; GCN-NEXT:    v_writelane_b32 v40, s40, 8
+; GCN-NEXT:    v_writelane_b32 v40, s41, 9
+; GCN-NEXT:    v_writelane_b32 v40, s42, 10
+; GCN-NEXT:    v_writelane_b32 v40, s43, 11
+; GCN-NEXT:    v_writelane_b32 v40, s44, 12
+; GCN-NEXT:    v_writelane_b32 v40, s45, 13
+; GCN-NEXT:    v_writelane_b32 v40, s46, 14
+; GCN-NEXT:    v_writelane_b32 v40, s47, 15
+; GCN-NEXT:    v_writelane_b32 v40, s48, 16
+; GCN-NEXT:    v_writelane_b32 v40, s49, 17
+; GCN-NEXT:    v_writelane_b32 v40, s50, 18
+; GCN-NEXT:    v_writelane_b32 v40, s51, 19
+; GCN-NEXT:    v_writelane_b32 v40, s52, 20
+; GCN-NEXT:    v_writelane_b32 v40, s53, 21
+; GCN-NEXT:    v_writelane_b32 v40, s54, 22
+; GCN-NEXT:    v_writelane_b32 v40, s55, 23
+; GCN-NEXT:    v_writelane_b32 v40, s56, 24
+; GCN-NEXT:    v_writelane_b32 v40, s57, 25
+; GCN-NEXT:    v_writelane_b32 v40, s58, 26
+; GCN-NEXT:    v_writelane_b32 v40, s59, 27
+; GCN-NEXT:    v_writelane_b32 v40, s60, 28
+; GCN-NEXT:    v_writelane_b32 v40, s61, 29
+; GCN-NEXT:    v_writelane_b32 v40, s62, 30
+; GCN-NEXT:    v_writelane_b32 v40, s63, 31
+; GCN-NEXT:    s_mov_b64 s[6:7], exec
 ; GCN-NEXT:    s_movk_i32 s4, 0x7b
 ; GCN-NEXT:  .LBB6_1: ; =>This Inner Loop Header: Depth=1
-; GCN-NEXT:    v_readfirstlane_b32 s12, v0
-; GCN-NEXT:    v_readfirstlane_b32 s13, v1
-; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, s[12:13], v[0:1]
-; GCN-NEXT:    s_and_saveexec_b64 s[10:11], vcc
-; GCN-NEXT:    s_swappc_b64 s[30:31], s[12:13]
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    v_readfirstlane_b32 s11, v1
+; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[0:1]
+; GCN-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GCN-NEXT:    s_swappc_b64 s[30:31], s[10:11]
 ; GCN-NEXT:    ; implicit-def: $vgpr0_vgpr1
-; GCN-NEXT:    s_xor_b64 exec, exec, s[10:11]
+; GCN-NEXT:    s_xor_b64 exec, exec, s[8:9]
 ; GCN-NEXT:    s_cbranch_execnz .LBB6_1
 ; GCN-NEXT:  ; %bb.2:
-; GCN-NEXT:    s_mov_b64 exec, s[8:9]
-; GCN-NEXT:    v_readlane_b32 s63, v40, 29
-; GCN-NEXT:    v_readlane_b32 s62, v40, 28
-; GCN-NEXT:    v_readlane_b32 s61, v40, 27
-; GCN-NEXT:    v_readlane_b32 s60, v40, 26
-; GCN-NEXT:    v_readlane_b32 s59, v40, 25
-; GCN-NEXT:    v_readlane_b32 s58, v40, 24
-; GCN-NEXT:    v_readlane_b32 s57, v40, 23
-; GCN-NEXT:    v_readlane_b32 s56, v40, 22
-; GCN-NEXT:    v_readlane_b32 s55, v40, 21
-; GCN-NEXT:    v_readlane_b32 s54, v40, 20
-; GCN-NEXT:    v_readlane_b32 s53, v40, 19
-; GCN-NEXT:    v_readlane_b32 s52, v40, 18
-; GCN-NEXT:    v_readlane_b32 s51, v40, 17
-; GCN-NEXT:    v_readlane_b32 s50, v40, 16
-; GCN-NEXT:    v_readlane_b32 s49, v40, 15
-; GCN-NEXT:    v_readlane_b32 s48, v40, 14
-; GCN-NEXT:    v_readlane_b32 s47, v40, 13
-; GCN-NEXT:    v_readlane_b32 s46, v40, 12
-; GCN-NEXT:    v_readlane_b32 s45, v40, 11
-; GCN-NEXT:    v_readlane_b32 s44, v40, 10
-; GCN-NEXT:    v_readlane_b32 s43, v40, 9
-; GCN-NEXT:    v_readlane_b32 s42, v40, 8
-; GCN-NEXT:    v_readlane_b32 s41, v40, 7
-; GCN-NEXT:    v_readlane_b32 s40, v40, 6
-; GCN-NEXT:    v_readlane_b32 s39, v40, 5
-; GCN-NEXT:    v_readlane_b32 s38, v40, 4
-; GCN-NEXT:    v_readlane_b32 s37, v40, 3
-; GCN-NEXT:    v_readlane_b32 s36, v40, 2
-; GCN-NEXT:    v_readlane_b32 s35, v40, 1
-; GCN-NEXT:    v_readlane_b32 s34, v40, 0
+; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    v_readlane_b32 s63, v40, 31
+; GCN-NEXT:    v_readlane_b32 s62, v40, 30
+; GCN-NEXT:    v_readlane_b32 s61, v40, 29
+; GCN-NEXT:    v_readlane_b32 s60, v40, 28
+; GCN-NEXT:    v_readlane_b32 s59, v40, 27
+; GCN-NEXT:    v_readlane_b32 s58, v40, 26
+; GCN-NEXT:    v_readlane_b32 s57, v40, 25
+; GCN-NEXT:    v_readlane_b32 s56, v40, 24
+; GCN-NEXT:    v_readlane_b32 s55, v40, 23
+; GCN-NEXT:    v_readlane_b32 s54, v40, 22
+; GCN-NEXT:    v_readlane_b32 s53, v40, 21
+; GCN-NEXT:    v_readlane_b32 s52, v40, 20
+; GCN-NEXT:    v_readlane_b32 s51, v40, 19
+; GCN-NEXT:    v_readlane_b32 s50, v40, 18
+; GCN-NEXT:    v_readlane_b32 s49, v40, 17
+; GCN-NEXT:    v_readlane_b32 s48, v40, 16
+; GCN-NEXT:    v_readlane_b32 s47, v40, 15
+; GCN-NEXT:    v_readlane_b32 s46, v40, 14
+; GCN-NEXT:    v_readlane_b32 s45, v40, 13
+; GCN-NEXT:    v_readlane_b32 s44, v40, 12
+; GCN-NEXT:    v_readlane_b32 s43, v40, 11
+; GCN-NEXT:    v_readlane_b32 s42, v40, 10
+; GCN-NEXT:    v_readlane_b32 s41, v40, 9
+; GCN-NEXT:    v_readlane_b32 s40, v40, 8
+; GCN-NEXT:    v_readlane_b32 s39, v40, 7
+; GCN-NEXT:    v_readlane_b32 s38, v40, 6
+; GCN-NEXT:    v_readlane_b32 s37, v40, 5
+; GCN-NEXT:    v_readlane_b32 s36, v40, 4
+; GCN-NEXT:    v_readlane_b32 s35, v40, 3
+; GCN-NEXT:    v_readlane_b32 s34, v40, 2
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
-; GCN-NEXT:    v_readlane_b32 s33, v40, 30
+; GCN-NEXT:    v_readlane_b32 s33, v40, 32
 ; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[6:7]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: test_indirect_call_vgpr_ptr_inreg_arg:
 ; GISEL:       ; %bb.0:
@@ -1165,90 +1168,93 @@ define void @test_indirect_call_vgpr_ptr_inreg_arg(void(i32)* %fptr) {
 ; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GISEL-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
-; GISEL-NEXT:    v_writelane_b32 v40, s33, 30
+; GISEL-NEXT:    v_writelane_b32 v40, s33, 32
 ; GISEL-NEXT:    s_mov_b32 s33, s32
 ; GISEL-NEXT:    s_addk_i32 s32, 0x400
-; GISEL-NEXT:    v_writelane_b32 v40, s34, 0
-; GISEL-NEXT:    v_writelane_b32 v40, s35, 1
-; GISEL-NEXT:    v_writelane_b32 v40, s36, 2
-; GISEL-NEXT:    v_writelane_b32 v40, s37, 3
-; GISEL-NEXT:    v_writelane_b32 v40, s38, 4
-; GISEL-NEXT:    v_writelane_b32 v40, s39, 5
-; GISEL-NEXT:    v_writelane_b32 v40, s40, 6
-; GISEL-NEXT:    v_writelane_b32 v40, s41, 7
-; GISEL-NEXT:    v_writelane_b32 v40, s42, 8
-; GISEL-NEXT:    v_writelane_b32 v40, s43, 9
-; GISEL-NEXT:    v_writelane_b32 v40, s44, 10
-; GISEL-NEXT:    v_writelane_b32 v40, s45, 11
-; GISEL-NEXT:    v_writelane_b32 v40, s46, 12
-; GISEL-NEXT:    v_writelane_b32 v40, s47, 13
-; GISEL-NEXT:    v_writelane_b32 v40, s48, 14
-; GISEL-NEXT:    v_writelane_b32 v40, s49, 15
-; GISEL-NEXT:    v_writelane_b32 v40, s50, 16
-; GISEL-NEXT:    v_writelane_b32 v40, s51, 17
-; GISEL-NEXT:    v_writelane_b32 v40, s52, 18
-; GISEL-NEXT:    v_writelane_b32 v40, s53, 19
-; GISEL-NEXT:    v_writelane_b32 v40, s54, 20
-; GISEL-NEXT:    v_writelane_b32 v40, s55, 21
-; GISEL-NEXT:    v_writelane_b32 v40, s56, 22
-; GISEL-NEXT:    v_writelane_b32 v40, s57, 23
-; GISEL-NEXT:    v_writelane_b32 v40, s58, 24
-; GISEL-NEXT:    v_writelane_b32 v40, s59, 25
-; GISEL-NEXT:    v_writelane_b32 v40, s60, 26
-; GISEL-NEXT:    v_writelane_b32 v40, s61, 27
-; GISEL-NEXT:    v_writelane_b32 v40, s62, 28
-; GISEL-NEXT:    v_writelane_b32 v40, s63, 29
-; GISEL-NEXT:    s_mov_b64 s[6:7], s[30:31]
-; GISEL-NEXT:    s_mov_b64 s[8:9], exec
+; GISEL-NEXT:    v_writelane_b32 v40, s30, 0
+; GISEL-NEXT:    v_writelane_b32 v40, s31, 1
+; GISEL-NEXT:    v_writelane_b32 v40, s34, 2
+; GISEL-NEXT:    v_writelane_b32 v40, s35, 3
+; GISEL-NEXT:    v_writelane_b32 v40, s36, 4
+; GISEL-NEXT:    v_writelane_b32 v40, s37, 5
+; GISEL-NEXT:    v_writelane_b32 v40, s38, 6
+; GISEL-NEXT:    v_writelane_b32 v40, s39, 7
+; GISEL-NEXT:    v_writelane_b32 v40, s40, 8
+; GISEL-NEXT:    v_writelane_b32 v40, s41, 9
+; GISEL-NEXT:    v_writelane_b32 v40, s42, 10
+; GISEL-NEXT:    v_writelane_b32 v40, s43, 11
+; GISEL-NEXT:    v_writelane_b32 v40, s44, 12
+; GISEL-NEXT:    v_writelane_b32 v40, s45, 13
+; GISEL-NEXT:    v_writelane_b32 v40, s46, 14
+; GISEL-NEXT:    v_writelane_b32 v40, s47, 15
+; GISEL-NEXT:    v_writelane_b32 v40, s48, 16
+; GISEL-NEXT:    v_writelane_b32 v40, s49, 17
+; GISEL-NEXT:    v_writelane_b32 v40, s50, 18
+; GISEL-NEXT:    v_writelane_b32 v40, s51, 19
+; GISEL-NEXT:    v_writelane_b32 v40, s52, 20
+; GISEL-NEXT:    v_writelane_b32 v40, s53, 21
+; GISEL-NEXT:    v_writelane_b32 v40, s54, 22
+; GISEL-NEXT:    v_writelane_b32 v40, s55, 23
+; GISEL-NEXT:    v_writelane_b32 v40, s56, 24
+; GISEL-NEXT:    v_writelane_b32 v40, s57, 25
+; GISEL-NEXT:    v_writelane_b32 v40, s58, 26
+; GISEL-NEXT:    v_writelane_b32 v40, s59, 27
+; GISEL-NEXT:    v_writelane_b32 v40, s60, 28
+; GISEL-NEXT:    v_writelane_b32 v40, s61, 29
+; GISEL-NEXT:    v_writelane_b32 v40, s62, 30
+; GISEL-NEXT:    v_writelane_b32 v40, s63, 31
+; GISEL-NEXT:    s_mov_b64 s[6:7], exec
 ; GISEL-NEXT:    s_movk_i32 s4, 0x7b
 ; GISEL-NEXT:  .LBB6_1: ; =>This Inner Loop Header: Depth=1
-; GISEL-NEXT:    v_readfirstlane_b32 s10, v0
-; GISEL-NEXT:    v_readfirstlane_b32 s11, v1
-; GISEL-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[0:1]
-; GISEL-NEXT:    s_and_saveexec_b64 s[12:13], vcc
-; GISEL-NEXT:    s_swappc_b64 s[30:31], s[10:11]
+; GISEL-NEXT:    v_readfirstlane_b32 s8, v0
+; GISEL-NEXT:    v_readfirstlane_b32 s9, v1
+; GISEL-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[0:1]
+; GISEL-NEXT:    s_and_saveexec_b64 s[10:11], vcc
+; GISEL-NEXT:    s_swappc_b64 s[30:31], s[8:9]
 ; GISEL-NEXT:    ; implicit-def: $vgpr0_vgpr1
-; GISEL-NEXT:    s_xor_b64 exec, exec, s[12:13]
+; GISEL-NEXT:    s_xor_b64 exec, exec, s[10:11]
 ; GISEL-NEXT:    s_cbranch_execnz .LBB6_1
 ; GISEL-NEXT:  ; %bb.2:
-; GISEL-NEXT:    s_mov_b64 exec, s[8:9]
-; GISEL-NEXT:    v_readlane_b32 s63, v40, 29
-; GISEL-NEXT:    v_readlane_b32 s62, v40, 28
-; GISEL-NEXT:    v_readlane_b32 s61, v40, 27
-; GISEL-NEXT:    v_readlane_b32 s60, v40, 26
-; GISEL-NEXT:    v_readlane_b32 s59, v40, 25
-; GISEL-NEXT:    v_readlane_b32 s58, v40, 24
-; GISEL-NEXT:    v_readlane_b32 s57, v40, 23
-; GISEL-NEXT:    v_readlane_b32 s56, v40, 22
-; GISEL-NEXT:    v_readlane_b32 s55, v40, 21
-; GISEL-NEXT:    v_readlane_b32 s54, v40, 20
-; GISEL-NEXT:    v_readlane_b32 s53, v40, 19
-; GISEL-NEXT:    v_readlane_b32 s52, v40, 18
-; GISEL-NEXT:    v_readlane_b32 s51, v40, 17
-; GISEL-NEXT:    v_readlane_b32 s50, v40, 16
-; GISEL-NEXT:    v_readlane_b32 s49, v40, 15
-; GISEL-NEXT:    v_readlane_b32 s48, v40, 14
-; GISEL-NEXT:    v_readlane_b32 s47, v40, 13
-; GISEL-NEXT:    v_readlane_b32 s46, v40, 12
-; GISEL-NEXT:    v_readlane_b32 s45, v40, 11
-; GISEL-NEXT:    v_readlane_b32 s44, v40, 10
-; GISEL-NEXT:    v_readlane_b32 s43, v40, 9
-; GISEL-NEXT:    v_readlane_b32 s42, v40, 8
-; GISEL-NEXT:    v_readlane_b32 s41, v40, 7
-; GISEL-NEXT:    v_readlane_b32 s40, v40, 6
-; GISEL-NEXT:    v_readlane_b32 s39, v40, 5
-; GISEL-NEXT:    v_readlane_b32 s38, v40, 4
-; GISEL-NEXT:    v_readlane_b32 s37, v40, 3
-; GISEL-NEXT:    v_readlane_b32 s36, v40, 2
-; GISEL-NEXT:    v_readlane_b32 s35, v40, 1
-; GISEL-NEXT:    v_readlane_b32 s34, v40, 0
+; GISEL-NEXT:    s_mov_b64 exec, s[6:7]
+; GISEL-NEXT:    v_readlane_b32 s63, v40, 31
+; GISEL-NEXT:    v_readlane_b32 s62, v40, 30
+; GISEL-NEXT:    v_readlane_b32 s61, v40, 29
+; GISEL-NEXT:    v_readlane_b32 s60, v40, 28
+; GISEL-NEXT:    v_readlane_b32 s59, v40, 27
+; GISEL-NEXT:    v_readlane_b32 s58, v40, 26
+; GISEL-NEXT:    v_readlane_b32 s57, v40, 25
+; GISEL-NEXT:    v_readlane_b32 s56, v40, 24
+; GISEL-NEXT:    v_readlane_b32 s55, v40, 23
+; GISEL-NEXT:    v_readlane_b32 s54, v40, 22
+; GISEL-NEXT:    v_readlane_b32 s53, v40, 21
+; GISEL-NEXT:    v_readlane_b32 s52, v40, 20
+; GISEL-NEXT:    v_readlane_b32 s51, v40, 19
+; GISEL-NEXT:    v_readlane_b32 s50, v40, 18
+; GISEL-NEXT:    v_readlane_b32 s49, v40, 17
+; GISEL-NEXT:    v_readlane_b32 s48, v40, 16
+; GISEL-NEXT:    v_readlane_b32 s47, v40, 15
+; GISEL-NEXT:    v_readlane_b32 s46, v40, 14
+; GISEL-NEXT:    v_readlane_b32 s45, v40, 13
+; GISEL-NEXT:    v_readlane_b32 s44, v40, 12
+; GISEL-NEXT:    v_readlane_b32 s43, v40, 11
+; GISEL-NEXT:    v_readlane_b32 s42, v40, 10
+; GISEL-NEXT:    v_readlane_b32 s41, v40, 9
+; GISEL-NEXT:    v_readlane_b32 s40, v40, 8
+; GISEL-NEXT:    v_readlane_b32 s39, v40, 7
+; GISEL-NEXT:    v_readlane_b32 s38, v40, 6
+; GISEL-NEXT:    v_readlane_b32 s37, v40, 5
+; GISEL-NEXT:    v_readlane_b32 s36, v40, 4
+; GISEL-NEXT:    v_readlane_b32 s35, v40, 3
+; GISEL-NEXT:    v_readlane_b32 s34, v40, 2
+; GISEL-NEXT:    v_readlane_b32 s31, v40, 1
+; GISEL-NEXT:    v_readlane_b32 s30, v40, 0
 ; GISEL-NEXT:    s_addk_i32 s32, 0xfc00
-; GISEL-NEXT:    v_readlane_b32 s33, v40, 30
+; GISEL-NEXT:    v_readlane_b32 s33, v40, 32
 ; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GISEL-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
 ; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GISEL-NEXT:    s_setpc_b64 s[6:7]
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
   call amdgpu_gfx void %fptr(i32 inreg 123)
   ret void
 }
@@ -1260,94 +1266,97 @@ define i32 @test_indirect_call_vgpr_ptr_arg_and_reuse(i32 %i, void(i32)* %fptr)
 ; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_mov_b64 exec, s[4:5]
-; GCN-NEXT:    v_writelane_b32 v40, s33, 30
+; GCN-NEXT:    v_writelane_b32 v40, s33, 32
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_addk_i32 s32, 0x400
 ; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 ; 4-byte Folded Spill
-; GCN-NEXT:    v_writelane_b32 v40, s34, 0
-; GCN-NEXT:    v_writelane_b32 v40, s35, 1
-; GCN-NEXT:    v_writelane_b32 v40, s36, 2
-; GCN-NEXT:    v_writelane_b32 v40, s37, 3
-; GCN-NEXT:    v_writelane_b32 v40, s38, 4
-; GCN-NEXT:    v_writelane_b32 v40, s39, 5
-; GCN-NEXT:    v_writelane_b32 v40, s40, 6
-; GCN-NEXT:    v_writelane_b32 v40, s41, 7
-; GCN-NEXT:    v_writelane_b32 v40, s42, 8
-; GCN-NEXT:    v_writelane_b32 v40, s43, 9
-; GCN-NEXT:    v_writelane_b32 v40, s44, 10
-; GCN-NEXT:    v_writelane_b32 v40, s45, 11
-; GCN-NEXT:    v_writelane_b32 v40, s46, 12
-; GCN-NEXT:    v_writelane_b32 v40, s47, 13
-; GCN-NEXT:    v_writelane_b32 v40, s48, 14
-; GCN-NEXT:    v_writelane_b32 v40, s49, 15
-; GCN-NEXT:    v_writelane_b32 v40, s50, 16
-; GCN-NEXT:    v_writelane_b32 v40, s51, 17
-; GCN-NEXT:    v_writelane_b32 v40, s52, 18
-; GCN-NEXT:    v_writelane_b32 v40, s53, 19
-; GCN-NEXT:    v_writelane_b32 v40, s54, 20
-; GCN-NEXT:    v_writelane_b32 v40, s55, 21
-; GCN-NEXT:    v_writelane_b32 v40, s56, 22
-; GCN-NEXT:    v_writelane_b32 v40, s57, 23
-; GCN-NEXT:    v_writelane_b32 v40, s58, 24
-; GCN-NEXT:    v_writelane_b32 v40, s59, 25
-; GCN-NEXT:    v_writelane_b32 v40, s60, 26
-; GCN-NEXT:    v_writelane_b32 v40, s61, 27
-; GCN-NEXT:    v_writelane_b32 v40, s62, 28
-; GCN-NEXT:    v_writelane_b32 v40, s63, 29
-; GCN-NEXT:    s_mov_b64 s[4:5], s[30:31]
+; GCN-NEXT:    v_writelane_b32 v40, s30, 0
+; GCN-NEXT:    v_writelane_b32 v40, s31, 1
+; GCN-NEXT:    v_writelane_b32 v40, s34, 2
+; GCN-NEXT:    v_writelane_b32 v40, s35, 3
+; GCN-NEXT:    v_writelane_b32 v40, s36, 4
+; GCN-NEXT:    v_writelane_b32 v40, s37, 5
+; GCN-NEXT:    v_writelane_b32 v40, s38, 6
+; GCN-NEXT:    v_writelane_b32 v40, s39, 7
+; GCN-NEXT:    v_writelane_b32 v40, s40, 8
+; GCN-NEXT:    v_writelane_b32 v40, s41, 9
+; GCN-NEXT:    v_writelane_b32 v40, s42, 10
+; GCN-NEXT:    v_writelane_b32 v40, s43, 11
+; GCN-NEXT:    v_writelane_b32 v40, s44, 12
+; GCN-NEXT:    v_writelane_b32 v40, s45, 13
+; GCN-NEXT:    v_writelane_b32 v40, s46, 14
+; GCN-NEXT:    v_writelane_b32 v40, s47, 15
+; GCN-NEXT:    v_writelane_b32 v40, s48, 16
+; GCN-NEXT:    v_writelane_b32 v40, s49, 17
+; GCN-NEXT:    v_writelane_b32 v40, s50, 18
+; GCN-NEXT:    v_writelane_b32 v40, s51, 19
+; GCN-NEXT:    v_writelane_b32 v40, s52, 20
+; GCN-NEXT:    v_writelane_b32 v40, s53, 21
+; GCN-NEXT:    v_writelane_b32 v40, s54, 22
+; GCN-NEXT:    v_writelane_b32 v40, s55, 23
+; GCN-NEXT:    v_writelane_b32 v40, s56, 24
+; GCN-NEXT:    v_writelane_b32 v40, s57, 25
+; GCN-NEXT:    v_writelane_b32 v40, s58, 26
+; GCN-NEXT:    v_writelane_b32 v40, s59, 27
+; GCN-NEXT:    v_writelane_b32 v40, s60, 28
+; GCN-NEXT:    v_writelane_b32 v40, s61, 29
+; GCN-NEXT:    v_writelane_b32 v40, s62, 30
+; GCN-NEXT:    v_writelane_b32 v40, s63, 31
 ; GCN-NEXT:    v_mov_b32_e32 v41, v0
-; GCN-NEXT:    s_mov_b64 s[6:7], exec
+; GCN-NEXT:    s_mov_b64 s[4:5], exec
 ; GCN-NEXT:  .LBB7_1: ; =>This Inner Loop Header: Depth=1
-; GCN-NEXT:    v_readfirstlane_b32 s10, v1
-; GCN-NEXT:    v_readfirstlane_b32 s11, v2
-; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[1:2]
-; GCN-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GCN-NEXT:    v_readfirstlane_b32 s8, v1
+; GCN-NEXT:    v_readfirstlane_b32 s9, v2
+; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[1:2]
+; GCN-NEXT:    s_and_saveexec_b64 s[6:7], vcc
 ; GCN-NEXT:    v_mov_b32_e32 v0, v41
-; GCN-NEXT:    s_swappc_b64 s[30:31], s[10:11]
+; GCN-NEXT:    s_swappc_b64 s[30:31], s[8:9]
 ; GCN-NEXT:    ; implicit-def: $vgpr1_vgpr2
-; GCN-NEXT:    s_xor_b64 exec, exec, s[8:9]
+; GCN-NEXT:    s_xor_b64 exec, exec, s[6:7]
 ; GCN-NEXT:    s_cbranch_execnz .LBB7_1
 ; GCN-NEXT:  ; %bb.2:
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    v_mov_b32_e32 v0, v41
-; GCN-NEXT:    v_readlane_b32 s63, v40, 29
-; GCN-NEXT:    v_readlane_b32 s62, v40, 28
-; GCN-NEXT:    v_readlane_b32 s61, v40, 27
-; GCN-NEXT:    v_readlane_b32 s60, v40, 26
-; GCN-NEXT:    v_readlane_b32 s59, v40, 25
-; GCN-NEXT:    v_readlane_b32 s58, v40, 24
-; GCN-NEXT:    v_readlane_b32 s57, v40, 23
-; GCN-NEXT:    v_readlane_b32 s56, v40, 22
-; GCN-NEXT:    v_readlane_b32 s55, v40, 21
-; GCN-NEXT:    v_readlane_b32 s54, v40, 20
-; GCN-NEXT:    v_readlane_b32 s53, v40, 19
-; GCN-NEXT:    v_readlane_b32 s52, v40, 18
-; GCN-NEXT:    v_readlane_b32 s51, v40, 17
-; GCN-NEXT:    v_readlane_b32 s50, v40, 16
-; GCN-NEXT:    v_readlane_b32 s49, v40, 15
-; GCN-NEXT:    v_readlane_b32 s48, v40, 14
-; GCN-NEXT:    v_readlane_b32 s47, v40, 13
-; GCN-NEXT:    v_readlane_b32 s46, v40, 12
-; GCN-NEXT:    v_readlane_b32 s45, v40, 11
-; GCN-NEXT:    v_readlane_b32 s44, v40, 10
-; GCN-NEXT:    v_readlane_b32 s43, v40, 9
-; GCN-NEXT:    v_readlane_b32 s42, v40, 8
-; GCN-NEXT:    v_readlane_b32 s41, v40, 7
-; GCN-NEXT:    v_readlane_b32 s40, v40, 6
-; GCN-NEXT:    v_readlane_b32 s39, v40, 5
-; GCN-NEXT:    v_readlane_b32 s38, v40, 4
-; GCN-NEXT:    v_readlane_b32 s37, v40, 3
-; GCN-NEXT:    v_readlane_b32 s36, v40, 2
-; GCN-NEXT:    v_readlane_b32 s35, v40, 1
-; GCN-NEXT:    v_readlane_b32 s34, v40, 0
+; GCN-NEXT:    v_readlane_b32 s63, v40, 31
+; GCN-NEXT:    v_readlane_b32 s62, v40, 30
+; GCN-NEXT:    v_readlane_b32 s61, v40, 29
+; GCN-NEXT:    v_readlane_b32 s60, v40, 28
+; GCN-NEXT:    v_readlane_b32 s59, v40, 27
+; GCN-NEXT:    v_readlane_b32 s58, v40, 26
+; GCN-NEXT:    v_readlane_b32 s57, v40, 25
+; GCN-NEXT:    v_readlane_b32 s56, v40, 24
+; GCN-NEXT:    v_readlane_b32 s55, v40, 23
+; GCN-NEXT:    v_readlane_b32 s54, v40, 22
+; GCN-NEXT:    v_readlane_b32 s53, v40, 21
+; GCN-NEXT:    v_readlane_b32 s52, v40, 20
+; GCN-NEXT:    v_readlane_b32 s51, v40, 19
+; GCN-NEXT:    v_readlane_b32 s50, v40, 18
+; GCN-NEXT:    v_readlane_b32 s49, v40, 17
+; GCN-NEXT:    v_readlane_b32 s48, v40, 16
+; GCN-NEXT:    v_readlane_b32 s47, v40, 15
+; GCN-NEXT:    v_readlane_b32 s46, v40, 14
+; GCN-NEXT:    v_readlane_b32 s45, v40, 13
+; GCN-NEXT:    v_readlane_b32 s44, v40, 12
+; GCN-NEXT:    v_readlane_b32 s43, v40, 11
+; GCN-NEXT:    v_readlane_b32 s42, v40, 10
+; GCN-NEXT:    v_readlane_b32 s41, v40, 9
+; GCN-NEXT:    v_readlane_b32 s40, v40, 8
+; GCN-NEXT:    v_readlane_b32 s39, v40, 7
+; GCN-NEXT:    v_readlane_b32 s38, v40, 6
+; GCN-NEXT:    v_readlane_b32 s37, v40, 5
+; GCN-NEXT:    v_readlane_b32 s36, v40, 4
+; GCN-NEXT:    v_readlane_b32 s35, v40, 3
+; GCN-NEXT:    v_readlane_b32 s34, v40, 2
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    buffer_load_dword v41, off, s[0:3], s33 ; 4-byte Folded Reload
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
-; GCN-NEXT:    v_readlane_b32 s33, v40, 30
-; GCN-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT:    v_readlane_b32 s33, v40, 32
+; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: test_indirect_call_vgpr_ptr_arg_and_reuse:
 ; GISEL:       ; %bb.0:
@@ -1355,94 +1364,97 @@ define i32 @test_indirect_call_vgpr_ptr_arg_and_reuse(i32 %i, void(i32)* %fptr)
 ; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GISEL-NEXT:    buffer_store_dword v40, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
 ; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
-; GISEL-NEXT:    v_writelane_b32 v40, s33, 30
+; GISEL-NEXT:    v_writelane_b32 v40, s33, 32
 ; GISEL-NEXT:    s_mov_b32 s33, s32
 ; GISEL-NEXT:    s_addk_i32 s32, 0x400
 ; GISEL-NEXT:    buffer_store_dword v41, off, s[0:3], s33 ; 4-byte Folded Spill
-; GISEL-NEXT:    v_writelane_b32 v40, s34, 0
-; GISEL-NEXT:    v_writelane_b32 v40, s35, 1
-; GISEL-NEXT:    v_writelane_b32 v40, s36, 2
-; GISEL-NEXT:    v_writelane_b32 v40, s37, 3
-; GISEL-NEXT:    v_writelane_b32 v40, s38, 4
-; GISEL-NEXT:    v_writelane_b32 v40, s39, 5
-; GISEL-NEXT:    v_writelane_b32 v40, s40, 6
-; GISEL-NEXT:    v_writelane_b32 v40, s41, 7
-; GISEL-NEXT:    v_writelane_b32 v40, s42, 8
-; GISEL-NEXT:    v_writelane_b32 v40, s43, 9
-; GISEL-NEXT:    v_writelane_b32 v40, s44, 10
-; GISEL-NEXT:    v_writelane_b32 v40, s45, 11
-; GISEL-NEXT:    v_writelane_b32 v40, s46, 12
-; GISEL-NEXT:    v_writelane_b32 v40, s47, 13
-; GISEL-NEXT:    v_writelane_b32 v40, s48, 14
-; GISEL-NEXT:    v_writelane_b32 v40, s49, 15
-; GISEL-NEXT:    v_writelane_b32 v40, s50, 16
-; GISEL-NEXT:    v_writelane_b32 v40, s51, 17
-; GISEL-NEXT:    v_writelane_b32 v40, s52, 18
-; GISEL-NEXT:    v_writelane_b32 v40, s53, 19
-; GISEL-NEXT:    v_writelane_b32 v40, s54, 20
-; GISEL-NEXT:    v_writelane_b32 v40, s55, 21
-; GISEL-NEXT:    v_writelane_b32 v40, s56, 22
-; GISEL-NEXT:    v_writelane_b32 v40, s57, 23
-; GISEL-NEXT:    v_writelane_b32 v40, s58, 24
-; GISEL-NEXT:    v_writelane_b32 v40, s59, 25
-; GISEL-NEXT:    v_writelane_b32 v40, s60, 26
-; GISEL-NEXT:    v_writelane_b32 v40, s61, 27
-; GISEL-NEXT:    v_writelane_b32 v40, s62, 28
-; GISEL-NEXT:    v_writelane_b32 v40, s63, 29
+; GISEL-NEXT:    v_writelane_b32 v40, s30, 0
+; GISEL-NEXT:    v_writelane_b32 v40, s31, 1
+; GISEL-NEXT:    v_writelane_b32 v40, s34, 2
+; GISEL-NEXT:    v_writelane_b32 v40, s35, 3
+; GISEL-NEXT:    v_writelane_b32 v40, s36, 4
+; GISEL-NEXT:    v_writelane_b32 v40, s37, 5
+; GISEL-NEXT:    v_writelane_b32 v40, s38, 6
+; GISEL-NEXT:    v_writelane_b32 v40, s39, 7
+; GISEL-NEXT:    v_writelane_b32 v40, s40, 8
+; GISEL-NEXT:    v_writelane_b32 v40, s41, 9
+; GISEL-NEXT:    v_writelane_b32 v40, s42, 10
+; GISEL-NEXT:    v_writelane_b32 v40, s43, 11
+; GISEL-NEXT:    v_writelane_b32 v40, s44, 12
+; GISEL-NEXT:    v_writelane_b32 v40, s45, 13
+; GISEL-NEXT:    v_writelane_b32 v40, s46, 14
+; GISEL-NEXT:    v_writelane_b32 v40, s47, 15
+; GISEL-NEXT:    v_writelane_b32 v40, s48, 16
+; GISEL-NEXT:    v_writelane_b32 v40, s49, 17
+; GISEL-NEXT:    v_writelane_b32 v40, s50, 18
+; GISEL-NEXT:    v_writelane_b32 v40, s51, 19
+; GISEL-NEXT:    v_writelane_b32 v40, s52, 20
+; GISEL-NEXT:    v_writelane_b32 v40, s53, 21
+; GISEL-NEXT:    v_writelane_b32 v40, s54, 22
+; GISEL-NEXT:    v_writelane_b32 v40, s55, 23
+; GISEL-NEXT:    v_writelane_b32 v40, s56, 24
+; GISEL-NEXT:    v_writelane_b32 v40, s57, 25
+; GISEL-NEXT:    v_writelane_b32 v40, s58, 26
+; GISEL-NEXT:    v_writelane_b32 v40, s59, 27
+; GISEL-NEXT:    v_writelane_b32 v40, s60, 28
+; GISEL-NEXT:    v_writelane_b32 v40, s61, 29
+; GISEL-NEXT:    v_writelane_b32 v40, s62, 30
+; GISEL-NEXT:    v_writelane_b32 v40, s63, 31
 ; GISEL-NEXT:    v_mov_b32_e32 v41, v0
-; GISEL-NEXT:    s_mov_b64 s[4:5], s[30:31]
-; GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GISEL-NEXT:    s_mov_b64 s[4:5], exec
 ; GISEL-NEXT:  .LBB7_1: ; =>This Inner Loop Header: Depth=1
-; GISEL-NEXT:    v_readfirstlane_b32 s8, v1
-; GISEL-NEXT:    v_readfirstlane_b32 s9, v2
-; GISEL-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[1:2]
-; GISEL-NEXT:    s_and_saveexec_b64 s[10:11], vcc
+; GISEL-NEXT:    v_readfirstlane_b32 s6, v1
+; GISEL-NEXT:    v_readfirstlane_b32 s7, v2
+; GISEL-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[1:2]
+; GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
 ; GISEL-NEXT:    v_mov_b32_e32 v0, v41
-; GISEL-NEXT:    s_swappc_b64 s[30:31], s[8:9]
+; GISEL-NEXT:    s_swappc_b64 s[30:31], s[6:7]
 ; GISEL-NEXT:    ; implicit-def: $vgpr1_vgpr2
-; GISEL-NEXT:    s_xor_b64 exec, exec, s[10:11]
+; GISEL-NEXT:    s_xor_b64 exec, exec, s[8:9]
 ; GISEL-NEXT:    s_cbranch_execnz .LBB7_1
 ; GISEL-NEXT:  ; %bb.2:
-; GISEL-NEXT:    s_mov_b64 exec, s[6:7]
+; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
 ; GISEL-NEXT:    v_mov_b32_e32 v0, v41
-; GISEL-NEXT:    v_readlane_b32 s63, v40, 29
-; GISEL-NEXT:    v_readlane_b32 s62, v40, 28
-; GISEL-NEXT:    v_readlane_b32 s61, v40, 27
-; GISEL-NEXT:    v_readlane_b32 s60, v40, 26
-; GISEL-NEXT:    v_readlane_b32 s59, v40, 25
-; GISEL-NEXT:    v_readlane_b32 s58, v40, 24
-; GISEL-NEXT:    v_readlane_b32 s57, v40, 23
-; GISEL-NEXT:    v_readlane_b32 s56, v40, 22
-; GISEL-NEXT:    v_readlane_b32 s55, v40, 21
-; GISEL-NEXT:    v_readlane_b32 s54, v40, 20
-; GISEL-NEXT:    v_readlane_b32 s53, v40, 19
-; GISEL-NEXT:    v_readlane_b32 s52, v40, 18
-; GISEL-NEXT:    v_readlane_b32 s51, v40, 17
-; GISEL-NEXT:    v_readlane_b32 s50, v40, 16
-; GISEL-NEXT:    v_readlane_b32 s49, v40, 15
-; GISEL-NEXT:    v_readlane_b32 s48, v40, 14
-; GISEL-NEXT:    v_readlane_b32 s47, v40, 13
-; GISEL-NEXT:    v_readlane_b32 s46, v40, 12
-; GISEL-NEXT:    v_readlane_b32 s45, v40, 11
-; GISEL-NEXT:    v_readlane_b32 s44, v40, 10
-; GISEL-NEXT:    v_readlane_b32 s43, v40, 9
-; GISEL-NEXT:    v_readlane_b32 s42, v40, 8
-; GISEL-NEXT:    v_readlane_b32 s41, v40, 7
-; GISEL-NEXT:    v_readlane_b32 s40, v40, 6
-; GISEL-NEXT:    v_readlane_b32 s39, v40, 5
-; GISEL-NEXT:    v_readlane_b32 s38, v40, 4
-; GISEL-NEXT:    v_readlane_b32 s37, v40, 3
-; GISEL-NEXT:    v_readlane_b32 s36, v40, 2
-; GISEL-NEXT:    v_readlane_b32 s35, v40, 1
-; GISEL-NEXT:    v_readlane_b32 s34, v40, 0
+; GISEL-NEXT:    v_readlane_b32 s63, v40, 31
+; GISEL-NEXT:    v_readlane_b32 s62, v40, 30
+; GISEL-NEXT:    v_readlane_b32 s61, v40, 29
+; GISEL-NEXT:    v_readlane_b32 s60, v40, 28
+; GISEL-NEXT:    v_readlane_b32 s59, v40, 27
+; GISEL-NEXT:    v_readlane_b32 s58, v40, 26
+; GISEL-NEXT:    v_readlane_b32 s57, v40, 25
+; GISEL-NEXT:    v_readlane_b32 s56, v40, 24
+; GISEL-NEXT:    v_readlane_b32 s55, v40, 23
+; GISEL-NEXT:    v_readlane_b32 s54, v40, 22
+; GISEL-NEXT:    v_readlane_b32 s53, v40, 21
+; GISEL-NEXT:    v_readlane_b32 s52, v40, 20
+; GISEL-NEXT:    v_readlane_b32 s51, v40, 19
+; GISEL-NEXT:    v_readlane_b32 s50, v40, 18
+; GISEL-NEXT:    v_readlane_b32 s49, v40, 17
+; GISEL-NEXT:    v_readlane_b32 s48, v40, 16
+; GISEL-NEXT:    v_readlane_b32 s47, v40, 15
+; GISEL-NEXT:    v_readlane_b32 s46, v40, 14
+; GISEL-NEXT:    v_readlane_b32 s45, v40, 13
+; GISEL-NEXT:    v_readlane_b32 s44, v40, 12
+; GISEL-NEXT:    v_readlane_b32 s43, v40, 11
+; GISEL-NEXT:    v_readlane_b32 s42, v40, 10
+; GISEL-NEXT:    v_readlane_b32 s41, v40, 9
+; GISEL-NEXT:    v_readlane_b32 s40, v40, 8
+; GISEL-NEXT:    v_readlane_b32 s39, v40, 7
+; GISEL-NEXT:    v_readlane_b32 s38, v40, 6
+; GISEL-NEXT:    v_readlane_b32 s37, v40, 5
+; GISEL-NEXT:    v_readlane_b32 s36, v40, 4
+; GISEL-NEXT:    v_readlane_b32 s35, v40, 3
+; GISEL-NEXT:    v_readlane_b32 s34, v40, 2
+; GISEL-NEXT:    v_readlane_b32 s31, v40, 1
+; GISEL-NEXT:    v_readlane_b32 s30, v40, 0
 ; GISEL-NEXT:    buffer_load_dword v41, off, s[0:3], s33 ; 4-byte Folded Reload
 ; GISEL-NEXT:    s_addk_i32 s32, 0xfc00
-; GISEL-NEXT:    v_readlane_b32 s33, v40, 30
-; GISEL-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GISEL-NEXT:    v_readlane_b32 s33, v40, 32
+; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GISEL-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
-; GISEL-NEXT:    s_mov_b64 exec, s[6:7]
+; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GISEL-NEXT:    s_setpc_b64 s[4:5]
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
   call amdgpu_gfx void %fptr(i32 %i)
   ret i32 %i
 }
@@ -1458,92 +1470,95 @@ define i32 @test_indirect_call_vgpr_ptr_arg_and_return(i32 %i, i32(i32)* %fptr)
 ; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_mov_b64 exec, s[4:5]
-; GCN-NEXT:    v_writelane_b32 v40, s33, 30
+; GCN-NEXT:    v_writelane_b32 v40, s33, 32
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_addk_i32 s32, 0x400
-; GCN-NEXT:    v_writelane_b32 v40, s34, 0
-; GCN-NEXT:    v_writelane_b32 v40, s35, 1
-; GCN-NEXT:    v_writelane_b32 v40, s36, 2
-; GCN-NEXT:    v_writelane_b32 v40, s37, 3
-; GCN-NEXT:    v_writelane_b32 v40, s38, 4
-; GCN-NEXT:    v_writelane_b32 v40, s39, 5
-; GCN-NEXT:    v_writelane_b32 v40, s40, 6
-; GCN-NEXT:    v_writelane_b32 v40, s41, 7
-; GCN-NEXT:    v_writelane_b32 v40, s42, 8
-; GCN-NEXT:    v_writelane_b32 v40, s43, 9
-; GCN-NEXT:    v_writelane_b32 v40, s44, 10
-; GCN-NEXT:    v_writelane_b32 v40, s45, 11
-; GCN-NEXT:    v_writelane_b32 v40, s46, 12
-; GCN-NEXT:    v_writelane_b32 v40, s47, 13
-; GCN-NEXT:    v_writelane_b32 v40, s48, 14
-; GCN-NEXT:    v_writelane_b32 v40, s49, 15
-; GCN-NEXT:    v_writelane_b32 v40, s50, 16
-; GCN-NEXT:    v_writelane_b32 v40, s51, 17
-; GCN-NEXT:    v_writelane_b32 v40, s52, 18
-; GCN-NEXT:    v_writelane_b32 v40, s53, 19
-; GCN-NEXT:    v_writelane_b32 v40, s54, 20
-; GCN-NEXT:    v_writelane_b32 v40, s55, 21
-; GCN-NEXT:    v_writelane_b32 v40, s56, 22
-; GCN-NEXT:    v_writelane_b32 v40, s57, 23
-; GCN-NEXT:    v_writelane_b32 v40, s58, 24
-; GCN-NEXT:    v_writelane_b32 v40, s59, 25
-; GCN-NEXT:    v_writelane_b32 v40, s60, 26
-; GCN-NEXT:    v_writelane_b32 v40, s61, 27
-; GCN-NEXT:    v_writelane_b32 v40, s62, 28
-; GCN-NEXT:    v_writelane_b32 v40, s63, 29
-; GCN-NEXT:    s_mov_b64 s[4:5], s[30:31]
-; GCN-NEXT:    s_mov_b64 s[6:7], exec
+; GCN-NEXT:    v_writelane_b32 v40, s30, 0
+; GCN-NEXT:    v_writelane_b32 v40, s31, 1
+; GCN-NEXT:    v_writelane_b32 v40, s34, 2
+; GCN-NEXT:    v_writelane_b32 v40, s35, 3
+; GCN-NEXT:    v_writelane_b32 v40, s36, 4
+; GCN-NEXT:    v_writelane_b32 v40, s37, 5
+; GCN-NEXT:    v_writelane_b32 v40, s38, 6
+; GCN-NEXT:    v_writelane_b32 v40, s39, 7
+; GCN-NEXT:    v_writelane_b32 v40, s40, 8
+; GCN-NEXT:    v_writelane_b32 v40, s41, 9
+; GCN-NEXT:    v_writelane_b32 v40, s42, 10
+; GCN-NEXT:    v_writelane_b32 v40, s43, 11
+; GCN-NEXT:    v_writelane_b32 v40, s44, 12
+; GCN-NEXT:    v_writelane_b32 v40, s45, 13
+; GCN-NEXT:    v_writelane_b32 v40, s46, 14
+; GCN-NEXT:    v_writelane_b32 v40, s47, 15
+; GCN-NEXT:    v_writelane_b32 v40, s48, 16
+; GCN-NEXT:    v_writelane_b32 v40, s49, 17
+; GCN-NEXT:    v_writelane_b32 v40, s50, 18
+; GCN-NEXT:    v_writelane_b32 v40, s51, 19
+; GCN-NEXT:    v_writelane_b32 v40, s52, 20
+; GCN-NEXT:    v_writelane_b32 v40, s53, 21
+; GCN-NEXT:    v_writelane_b32 v40, s54, 22
+; GCN-NEXT:    v_writelane_b32 v40, s55, 23
+; GCN-NEXT:    v_writelane_b32 v40, s56, 24
+; GCN-NEXT:    v_writelane_b32 v40, s57, 25
+; GCN-NEXT:    v_writelane_b32 v40, s58, 26
+; GCN-NEXT:    v_writelane_b32 v40, s59, 27
+; GCN-NEXT:    v_writelane_b32 v40, s60, 28
+; GCN-NEXT:    v_writelane_b32 v40, s61, 29
+; GCN-NEXT:    v_writelane_b32 v40, s62, 30
+; GCN-NEXT:    v_writelane_b32 v40, s63, 31
+; GCN-NEXT:    s_mov_b64 s[4:5], exec
 ; GCN-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GCN-NEXT:    v_readfirstlane_b32 s10, v1
-; GCN-NEXT:    v_readfirstlane_b32 s11, v2
-; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[1:2]
-; GCN-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-NEXT:    s_swappc_b64 s[30:31], s[10:11]
+; GCN-NEXT:    v_readfirstlane_b32 s8, v1
+; GCN-NEXT:    v_readfirstlane_b32 s9, v2
+; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[1:2]
+; GCN-NEXT:    s_and_saveexec_b64 s[6:7], vcc
+; GCN-NEXT:    s_swappc_b64 s[30:31], s[8:9]
 ; GCN-NEXT:    v_mov_b32_e32 v3, v0
 ; GCN-NEXT:    ; implicit-def: $vgpr1_vgpr2
 ; GCN-NEXT:    ; implicit-def: $vgpr0
-; GCN-NEXT:    s_xor_b64 exec, exec, s[8:9]
+; GCN-NEXT:    s_xor_b64 exec, exec, s[6:7]
 ; GCN-NEXT:    s_cbranch_execnz .LBB8_1
 ; GCN-NEXT:  ; %bb.2:
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    v_mov_b32_e32 v0, v3
-; GCN-NEXT:    v_readlane_b32 s63, v40, 29
-; GCN-NEXT:    v_readlane_b32 s62, v40, 28
-; GCN-NEXT:    v_readlane_b32 s61, v40, 27
-; GCN-NEXT:    v_readlane_b32 s60, v40, 26
-; GCN-NEXT:    v_readlane_b32 s59, v40, 25
-; GCN-NEXT:    v_readlane_b32 s58, v40, 24
-; GCN-NEXT:    v_readlane_b32 s57, v40, 23
-; GCN-NEXT:    v_readlane_b32 s56, v40, 22
-; GCN-NEXT:    v_readlane_b32 s55, v40, 21
-; GCN-NEXT:    v_readlane_b32 s54, v40, 20
-; GCN-NEXT:    v_readlane_b32 s53, v40, 19
-; GCN-NEXT:    v_readlane_b32 s52, v40, 18
-; GCN-NEXT:    v_readlane_b32 s51, v40, 17
-; GCN-NEXT:    v_readlane_b32 s50, v40, 16
-; GCN-NEXT:    v_readlane_b32 s49, v40, 15
-; GCN-NEXT:    v_readlane_b32 s48, v40, 14
-; GCN-NEXT:    v_readlane_b32 s47, v40, 13
-; GCN-NEXT:    v_readlane_b32 s46, v40, 12
-; GCN-NEXT:    v_readlane_b32 s45, v40, 11
-; GCN-NEXT:    v_readlane_b32 s44, v40, 10
-; GCN-NEXT:    v_readlane_b32 s43, v40, 9
-; GCN-NEXT:    v_readlane_b32 s42, v40, 8
-; GCN-NEXT:    v_readlane_b32 s41, v40, 7
-; GCN-NEXT:    v_readlane_b32 s40, v40, 6
-; GCN-NEXT:    v_readlane_b32 s39, v40, 5
-; GCN-NEXT:    v_readlane_b32 s38, v40, 4
-; GCN-NEXT:    v_readlane_b32 s37, v40, 3
-; GCN-NEXT:    v_readlane_b32 s36, v40, 2
-; GCN-NEXT:    v_readlane_b32 s35, v40, 1
-; GCN-NEXT:    v_readlane_b32 s34, v40, 0
+; GCN-NEXT:    v_readlane_b32 s63, v40, 31
+; GCN-NEXT:    v_readlane_b32 s62, v40, 30
+; GCN-NEXT:    v_readlane_b32 s61, v40, 29
+; GCN-NEXT:    v_readlane_b32 s60, v40, 28
+; GCN-NEXT:    v_readlane_b32 s59, v40, 27
+; GCN-NEXT:    v_readlane_b32 s58, v40, 26
+; GCN-NEXT:    v_readlane_b32 s57, v40, 25
+; GCN-NEXT:    v_readlane_b32 s56, v40, 24
+; GCN-NEXT:    v_readlane_b32 s55, v40, 23
+; GCN-NEXT:    v_readlane_b32 s54, v40, 22
+; GCN-NEXT:    v_readlane_b32 s53, v40, 21
+; GCN-NEXT:    v_readlane_b32 s52, v40, 20
+; GCN-NEXT:    v_readlane_b32 s51, v40, 19
+; GCN-NEXT:    v_readlane_b32 s50, v40, 18
+; GCN-NEXT:    v_readlane_b32 s49, v40, 17
+; GCN-NEXT:    v_readlane_b32 s48, v40, 16
+; GCN-NEXT:    v_readlane_b32 s47, v40, 15
+; GCN-NEXT:    v_readlane_b32 s46, v40, 14
+; GCN-NEXT:    v_readlane_b32 s45, v40, 13
+; GCN-NEXT:    v_readlane_b32 s44, v40, 12
+; GCN-NEXT:    v_readlane_b32 s43, v40, 11
+; GCN-NEXT:    v_readlane_b32 s42, v40, 10
+; GCN-NEXT:    v_readlane_b32 s41, v40, 9
+; GCN-NEXT:    v_readlane_b32 s40, v40, 8
+; GCN-NEXT:    v_readlane_b32 s39, v40, 7
+; GCN-NEXT:    v_readlane_b32 s38, v40, 6
+; GCN-NEXT:    v_readlane_b32 s37, v40, 5
+; GCN-NEXT:    v_readlane_b32 s36, v40, 4
+; GCN-NEXT:    v_readlane_b32 s35, v40, 3
+; GCN-NEXT:    v_readlane_b32 s34, v40, 2
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
-; GCN-NEXT:    v_readlane_b32 s33, v40, 30
-; GCN-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT:    v_readlane_b32 s33, v40, 32
+; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: test_indirect_call_vgpr_ptr_arg_and_return:
 ; GISEL:       ; %bb.0:
@@ -1551,92 +1566,95 @@ define i32 @test_indirect_call_vgpr_ptr_arg_and_return(i32 %i, i32(i32)* %fptr)
 ; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GISEL-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
-; GISEL-NEXT:    v_writelane_b32 v40, s33, 30
+; GISEL-NEXT:    v_writelane_b32 v40, s33, 32
 ; GISEL-NEXT:    s_mov_b32 s33, s32
 ; GISEL-NEXT:    s_addk_i32 s32, 0x400
-; GISEL-NEXT:    v_writelane_b32 v40, s34, 0
-; GISEL-NEXT:    v_writelane_b32 v40, s35, 1
-; GISEL-NEXT:    v_writelane_b32 v40, s36, 2
-; GISEL-NEXT:    v_writelane_b32 v40, s37, 3
-; GISEL-NEXT:    v_writelane_b32 v40, s38, 4
-; GISEL-NEXT:    v_writelane_b32 v40, s39, 5
-; GISEL-NEXT:    v_writelane_b32 v40, s40, 6
-; GISEL-NEXT:    v_writelane_b32 v40, s41, 7
-; GISEL-NEXT:    v_writelane_b32 v40, s42, 8
-; GISEL-NEXT:    v_writelane_b32 v40, s43, 9
-; GISEL-NEXT:    v_writelane_b32 v40, s44, 10
-; GISEL-NEXT:    v_writelane_b32 v40, s45, 11
-; GISEL-NEXT:    v_writelane_b32 v40, s46, 12
-; GISEL-NEXT:    v_writelane_b32 v40, s47, 13
-; GISEL-NEXT:    v_writelane_b32 v40, s48, 14
-; GISEL-NEXT:    v_writelane_b32 v40, s49, 15
-; GISEL-NEXT:    v_writelane_b32 v40, s50, 16
-; GISEL-NEXT:    v_writelane_b32 v40, s51, 17
-; GISEL-NEXT:    v_writelane_b32 v40, s52, 18
-; GISEL-NEXT:    v_writelane_b32 v40, s53, 19
-; GISEL-NEXT:    v_writelane_b32 v40, s54, 20
-; GISEL-NEXT:    v_writelane_b32 v40, s55, 21
-; GISEL-NEXT:    v_writelane_b32 v40, s56, 22
-; GISEL-NEXT:    v_writelane_b32 v40, s57, 23
-; GISEL-NEXT:    v_writelane_b32 v40, s58, 24
-; GISEL-NEXT:    v_writelane_b32 v40, s59, 25
-; GISEL-NEXT:    v_writelane_b32 v40, s60, 26
-; GISEL-NEXT:    v_writelane_b32 v40, s61, 27
-; GISEL-NEXT:    v_writelane_b32 v40, s62, 28
-; GISEL-NEXT:    v_writelane_b32 v40, s63, 29
-; GISEL-NEXT:    s_mov_b64 s[4:5], s[30:31]
-; GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GISEL-NEXT:    v_writelane_b32 v40, s30, 0
+; GISEL-NEXT:    v_writelane_b32 v40, s31, 1
+; GISEL-NEXT:    v_writelane_b32 v40, s34, 2
+; GISEL-NEXT:    v_writelane_b32 v40, s35, 3
+; GISEL-NEXT:    v_writelane_b32 v40, s36, 4
+; GISEL-NEXT:    v_writelane_b32 v40, s37, 5
+; GISEL-NEXT:    v_writelane_b32 v40, s38, 6
+; GISEL-NEXT:    v_writelane_b32 v40, s39, 7
+; GISEL-NEXT:    v_writelane_b32 v40, s40, 8
+; GISEL-NEXT:    v_writelane_b32 v40, s41, 9
+; GISEL-NEXT:    v_writelane_b32 v40, s42, 10
+; GISEL-NEXT:    v_writelane_b32 v40, s43, 11
+; GISEL-NEXT:    v_writelane_b32 v40, s44, 12
+; GISEL-NEXT:    v_writelane_b32 v40, s45, 13
+; GISEL-NEXT:    v_writelane_b32 v40, s46, 14
+; GISEL-NEXT:    v_writelane_b32 v40, s47, 15
+; GISEL-NEXT:    v_writelane_b32 v40, s48, 16
+; GISEL-NEXT:    v_writelane_b32 v40, s49, 17
+; GISEL-NEXT:    v_writelane_b32 v40, s50, 18
+; GISEL-NEXT:    v_writelane_b32 v40, s51, 19
+; GISEL-NEXT:    v_writelane_b32 v40, s52, 20
+; GISEL-NEXT:    v_writelane_b32 v40, s53, 21
+; GISEL-NEXT:    v_writelane_b32 v40, s54, 22
+; GISEL-NEXT:    v_writelane_b32 v40, s55, 23
+; GISEL-NEXT:    v_writelane_b32 v40, s56, 24
+; GISEL-NEXT:    v_writelane_b32 v40, s57, 25
+; GISEL-NEXT:    v_writelane_b32 v40, s58, 26
+; GISEL-NEXT:    v_writelane_b32 v40, s59, 27
+; GISEL-NEXT:    v_writelane_b32 v40, s60, 28
+; GISEL-NEXT:    v_writelane_b32 v40, s61, 29
+; GISEL-NEXT:    v_writelane_b32 v40, s62, 30
+; GISEL-NEXT:    v_writelane_b32 v40, s63, 31
+; GISEL-NEXT:    s_mov_b64 s[4:5], exec
 ; GISEL-NEXT:  .LBB8_1: ; =>This Inner Loop Header: Depth=1
-; GISEL-NEXT:    v_readfirstlane_b32 s8, v1
-; GISEL-NEXT:    v_readfirstlane_b32 s9, v2
-; GISEL-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[1:2]
-; GISEL-NEXT:    s_and_saveexec_b64 s[10:11], vcc
-; GISEL-NEXT:    s_swappc_b64 s[30:31], s[8:9]
+; GISEL-NEXT:    v_readfirstlane_b32 s6, v1
+; GISEL-NEXT:    v_readfirstlane_b32 s7, v2
+; GISEL-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[1:2]
+; GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GISEL-NEXT:    s_swappc_b64 s[30:31], s[6:7]
 ; GISEL-NEXT:    v_mov_b32_e32 v3, v0
 ; GISEL-NEXT:    ; implicit-def: $vgpr1_vgpr2
 ; GISEL-NEXT:    ; implicit-def: $vgpr0
-; GISEL-NEXT:    s_xor_b64 exec, exec, s[10:11]
+; GISEL-NEXT:    s_xor_b64 exec, exec, s[8:9]
 ; GISEL-NEXT:    s_cbranch_execnz .LBB8_1
 ; GISEL-NEXT:  ; %bb.2:
-; GISEL-NEXT:    s_mov_b64 exec, s[6:7]
+; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
 ; GISEL-NEXT:    v_mov_b32_e32 v0, v3
-; GISEL-NEXT:    v_readlane_b32 s63, v40, 29
-; GISEL-NEXT:    v_readlane_b32 s62, v40, 28
-; GISEL-NEXT:    v_readlane_b32 s61, v40, 27
-; GISEL-NEXT:    v_readlane_b32 s60, v40, 26
-; GISEL-NEXT:    v_readlane_b32 s59, v40, 25
-; GISEL-NEXT:    v_readlane_b32 s58, v40, 24
-; GISEL-NEXT:    v_readlane_b32 s57, v40, 23
-; GISEL-NEXT:    v_readlane_b32 s56, v40, 22
-; GISEL-NEXT:    v_readlane_b32 s55, v40, 21
-; GISEL-NEXT:    v_readlane_b32 s54, v40, 20
-; GISEL-NEXT:    v_readlane_b32 s53, v40, 19
-; GISEL-NEXT:    v_readlane_b32 s52, v40, 18
-; GISEL-NEXT:    v_readlane_b32 s51, v40, 17
-; GISEL-NEXT:    v_readlane_b32 s50, v40, 16
-; GISEL-NEXT:    v_readlane_b32 s49, v40, 15
-; GISEL-NEXT:    v_readlane_b32 s48, v40, 14
-; GISEL-NEXT:    v_readlane_b32 s47, v40, 13
-; GISEL-NEXT:    v_readlane_b32 s46, v40, 12
-; GISEL-NEXT:    v_readlane_b32 s45, v40, 11
-; GISEL-NEXT:    v_readlane_b32 s44, v40, 10
-; GISEL-NEXT:    v_readlane_b32 s43, v40, 9
-; GISEL-NEXT:    v_readlane_b32 s42, v40, 8
-; GISEL-NEXT:    v_readlane_b32 s41, v40, 7
-; GISEL-NEXT:    v_readlane_b32 s40, v40, 6
-; GISEL-NEXT:    v_readlane_b32 s39, v40, 5
-; GISEL-NEXT:    v_readlane_b32 s38, v40, 4
-; GISEL-NEXT:    v_readlane_b32 s37, v40, 3
-; GISEL-NEXT:    v_readlane_b32 s36, v40, 2
-; GISEL-NEXT:    v_readlane_b32 s35, v40, 1
-; GISEL-NEXT:    v_readlane_b32 s34, v40, 0
+; GISEL-NEXT:    v_readlane_b32 s63, v40, 31
+; GISEL-NEXT:    v_readlane_b32 s62, v40, 30
+; GISEL-NEXT:    v_readlane_b32 s61, v40, 29
+; GISEL-NEXT:    v_readlane_b32 s60, v40, 28
+; GISEL-NEXT:    v_readlane_b32 s59, v40, 27
+; GISEL-NEXT:    v_readlane_b32 s58, v40, 26
+; GISEL-NEXT:    v_readlane_b32 s57, v40, 25
+; GISEL-NEXT:    v_readlane_b32 s56, v40, 24
+; GISEL-NEXT:    v_readlane_b32 s55, v40, 23
+; GISEL-NEXT:    v_readlane_b32 s54, v40, 22
+; GISEL-NEXT:    v_readlane_b32 s53, v40, 21
+; GISEL-NEXT:    v_readlane_b32 s52, v40, 20
+; GISEL-NEXT:    v_readlane_b32 s51, v40, 19
+; GISEL-NEXT:    v_readlane_b32 s50, v40, 18
+; GISEL-NEXT:    v_readlane_b32 s49, v40, 17
+; GISEL-NEXT:    v_readlane_b32 s48, v40, 16
+; GISEL-NEXT:    v_readlane_b32 s47, v40, 15
+; GISEL-NEXT:    v_readlane_b32 s46, v40, 14
+; GISEL-NEXT:    v_readlane_b32 s45, v40, 13
+; GISEL-NEXT:    v_readlane_b32 s44, v40, 12
+; GISEL-NEXT:    v_readlane_b32 s43, v40, 11
+; GISEL-NEXT:    v_readlane_b32 s42, v40, 10
+; GISEL-NEXT:    v_readlane_b32 s41, v40, 9
+; GISEL-NEXT:    v_readlane_b32 s40, v40, 8
+; GISEL-NEXT:    v_readlane_b32 s39, v40, 7
+; GISEL-NEXT:    v_readlane_b32 s38, v40, 6
+; GISEL-NEXT:    v_readlane_b32 s37, v40, 5
+; GISEL-NEXT:    v_readlane_b32 s36, v40, 4
+; GISEL-NEXT:    v_readlane_b32 s35, v40, 3
+; GISEL-NEXT:    v_readlane_b32 s34, v40, 2
+; GISEL-NEXT:    v_readlane_b32 s31, v40, 1
+; GISEL-NEXT:    v_readlane_b32 s30, v40, 0
 ; GISEL-NEXT:    s_addk_i32 s32, 0xfc00
-; GISEL-NEXT:    v_readlane_b32 s33, v40, 30
-; GISEL-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GISEL-NEXT:    v_readlane_b32 s33, v40, 32
+; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GISEL-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GISEL-NEXT:    s_mov_b64 exec, s[6:7]
+; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GISEL-NEXT:    s_setpc_b64 s[4:5]
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
   %ret = call amdgpu_gfx i32 %fptr(i32 %i)
   ret i32 %ret
 }
@@ -1649,89 +1667,92 @@ define void @test_indirect_tail_call_vgpr_ptr(void()* %fptr) {
 ; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_mov_b64 exec, s[4:5]
-; GCN-NEXT:    v_writelane_b32 v40, s33, 30
+; GCN-NEXT:    v_writelane_b32 v40, s33, 32
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_addk_i32 s32, 0x400
-; GCN-NEXT:    v_writelane_b32 v40, s34, 0
-; GCN-NEXT:    v_writelane_b32 v40, s35, 1
-; GCN-NEXT:    v_writelane_b32 v40, s36, 2
-; GCN-NEXT:    v_writelane_b32 v40, s37, 3
-; GCN-NEXT:    v_writelane_b32 v40, s38, 4
-; GCN-NEXT:    v_writelane_b32 v40, s39, 5
-; GCN-NEXT:    v_writelane_b32 v40, s40, 6
-; GCN-NEXT:    v_writelane_b32 v40, s41, 7
-; GCN-NEXT:    v_writelane_b32 v40, s42, 8
-; GCN-NEXT:    v_writelane_b32 v40, s43, 9
-; GCN-NEXT:    v_writelane_b32 v40, s44, 10
-; GCN-NEXT:    v_writelane_b32 v40, s45, 11
-; GCN-NEXT:    v_writelane_b32 v40, s46, 12
-; GCN-NEXT:    v_writelane_b32 v40, s47, 13
-; GCN-NEXT:    v_writelane_b32 v40, s48, 14
-; GCN-NEXT:    v_writelane_b32 v40, s49, 15
-; GCN-NEXT:    v_writelane_b32 v40, s50, 16
-; GCN-NEXT:    v_writelane_b32 v40, s51, 17
-; GCN-NEXT:    v_writelane_b32 v40, s52, 18
-; GCN-NEXT:    v_writelane_b32 v40, s53, 19
-; GCN-NEXT:    v_writelane_b32 v40, s54, 20
-; GCN-NEXT:    v_writelane_b32 v40, s55, 21
-; GCN-NEXT:    v_writelane_b32 v40, s56, 22
-; GCN-NEXT:    v_writelane_b32 v40, s57, 23
-; GCN-NEXT:    v_writelane_b32 v40, s58, 24
-; GCN-NEXT:    v_writelane_b32 v40, s59, 25
-; GCN-NEXT:    v_writelane_b32 v40, s60, 26
-; GCN-NEXT:    v_writelane_b32 v40, s61, 27
-; GCN-NEXT:    v_writelane_b32 v40, s62, 28
-; GCN-NEXT:    v_writelane_b32 v40, s63, 29
-; GCN-NEXT:    s_mov_b64 s[4:5], s[30:31]
-; GCN-NEXT:    s_mov_b64 s[6:7], exec
+; GCN-NEXT:    v_writelane_b32 v40, s30, 0
+; GCN-NEXT:    v_writelane_b32 v40, s31, 1
+; GCN-NEXT:    v_writelane_b32 v40, s34, 2
+; GCN-NEXT:    v_writelane_b32 v40, s35, 3
+; GCN-NEXT:    v_writelane_b32 v40, s36, 4
+; GCN-NEXT:    v_writelane_b32 v40, s37, 5
+; GCN-NEXT:    v_writelane_b32 v40, s38, 6
+; GCN-NEXT:    v_writelane_b32 v40, s39, 7
+; GCN-NEXT:    v_writelane_b32 v40, s40, 8
+; GCN-NEXT:    v_writelane_b32 v40, s41, 9
+; GCN-NEXT:    v_writelane_b32 v40, s42, 10
+; GCN-NEXT:    v_writelane_b32 v40, s43, 11
+; GCN-NEXT:    v_writelane_b32 v40, s44, 12
+; GCN-NEXT:    v_writelane_b32 v40, s45, 13
+; GCN-NEXT:    v_writelane_b32 v40, s46, 14
+; GCN-NEXT:    v_writelane_b32 v40, s47, 15
+; GCN-NEXT:    v_writelane_b32 v40, s48, 16
+; GCN-NEXT:    v_writelane_b32 v40, s49, 17
+; GCN-NEXT:    v_writelane_b32 v40, s50, 18
+; GCN-NEXT:    v_writelane_b32 v40, s51, 19
+; GCN-NEXT:    v_writelane_b32 v40, s52, 20
+; GCN-NEXT:    v_writelane_b32 v40, s53, 21
+; GCN-NEXT:    v_writelane_b32 v40, s54, 22
+; GCN-NEXT:    v_writelane_b32 v40, s55, 23
+; GCN-NEXT:    v_writelane_b32 v40, s56, 24
+; GCN-NEXT:    v_writelane_b32 v40, s57, 25
+; GCN-NEXT:    v_writelane_b32 v40, s58, 26
+; GCN-NEXT:    v_writelane_b32 v40, s59, 27
+; GCN-NEXT:    v_writelane_b32 v40, s60, 28
+; GCN-NEXT:    v_writelane_b32 v40, s61, 29
+; GCN-NEXT:    v_writelane_b32 v40, s62, 30
+; GCN-NEXT:    v_writelane_b32 v40, s63, 31
+; GCN-NEXT:    s_mov_b64 s[4:5], exec
 ; GCN-NEXT:  .LBB9_1: ; =>This Inner Loop Header: Depth=1
-; GCN-NEXT:    v_readfirstlane_b32 s10, v0
-; GCN-NEXT:    v_readfirstlane_b32 s11, v1
-; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, s[10:11], v[0:1]
-; GCN-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-NEXT:    s_swappc_b64 s[30:31], s[10:11]
+; GCN-NEXT:    v_readfirstlane_b32 s8, v0
+; GCN-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[0:1]
+; GCN-NEXT:    s_and_saveexec_b64 s[6:7], vcc
+; GCN-NEXT:    s_swappc_b64 s[30:31], s[8:9]
 ; GCN-NEXT:    ; implicit-def: $vgpr0_vgpr1
-; GCN-NEXT:    s_xor_b64 exec, exec, s[8:9]
+; GCN-NEXT:    s_xor_b64 exec, exec, s[6:7]
 ; GCN-NEXT:    s_cbranch_execnz .LBB9_1
 ; GCN-NEXT:  ; %bb.2:
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
-; GCN-NEXT:    v_readlane_b32 s63, v40, 29
-; GCN-NEXT:    v_readlane_b32 s62, v40, 28
-; GCN-NEXT:    v_readlane_b32 s61, v40, 27
-; GCN-NEXT:    v_readlane_b32 s60, v40, 26
-; GCN-NEXT:    v_readlane_b32 s59, v40, 25
-; GCN-NEXT:    v_readlane_b32 s58, v40, 24
-; GCN-NEXT:    v_readlane_b32 s57, v40, 23
-; GCN-NEXT:    v_readlane_b32 s56, v40, 22
-; GCN-NEXT:    v_readlane_b32 s55, v40, 21
-; GCN-NEXT:    v_readlane_b32 s54, v40, 20
-; GCN-NEXT:    v_readlane_b32 s53, v40, 19
-; GCN-NEXT:    v_readlane_b32 s52, v40, 18
-; GCN-NEXT:    v_readlane_b32 s51, v40, 17
-; GCN-NEXT:    v_readlane_b32 s50, v40, 16
-; GCN-NEXT:    v_readlane_b32 s49, v40, 15
-; GCN-NEXT:    v_readlane_b32 s48, v40, 14
-; GCN-NEXT:    v_readlane_b32 s47, v40, 13
-; GCN-NEXT:    v_readlane_b32 s46, v40, 12
-; GCN-NEXT:    v_readlane_b32 s45, v40, 11
-; GCN-NEXT:    v_readlane_b32 s44, v40, 10
-; GCN-NEXT:    v_readlane_b32 s43, v40, 9
-; GCN-NEXT:    v_readlane_b32 s42, v40, 8
-; GCN-NEXT:    v_readlane_b32 s41, v40, 7
-; GCN-NEXT:    v_readlane_b32 s40, v40, 6
-; GCN-NEXT:    v_readlane_b32 s39, v40, 5
-; GCN-NEXT:    v_readlane_b32 s38, v40, 4
-; GCN-NEXT:    v_readlane_b32 s37, v40, 3
-; GCN-NEXT:    v_readlane_b32 s36, v40, 2
-; GCN-NEXT:    v_readlane_b32 s35, v40, 1
-; GCN-NEXT:    v_readlane_b32 s34, v40, 0
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
+; GCN-NEXT:    v_readlane_b32 s63, v40, 31
+; GCN-NEXT:    v_readlane_b32 s62, v40, 30
+; GCN-NEXT:    v_readlane_b32 s61, v40, 29
+; GCN-NEXT:    v_readlane_b32 s60, v40, 28
+; GCN-NEXT:    v_readlane_b32 s59, v40, 27
+; GCN-NEXT:    v_readlane_b32 s58, v40, 26
+; GCN-NEXT:    v_readlane_b32 s57, v40, 25
+; GCN-NEXT:    v_readlane_b32 s56, v40, 24
+; GCN-NEXT:    v_readlane_b32 s55, v40, 23
+; GCN-NEXT:    v_readlane_b32 s54, v40, 22
+; GCN-NEXT:    v_readlane_b32 s53, v40, 21
+; GCN-NEXT:    v_readlane_b32 s52, v40, 20
+; GCN-NEXT:    v_readlane_b32 s51, v40, 19
+; GCN-NEXT:    v_readlane_b32 s50, v40, 18
+; GCN-NEXT:    v_readlane_b32 s49, v40, 17
+; GCN-NEXT:    v_readlane_b32 s48, v40, 16
+; GCN-NEXT:    v_readlane_b32 s47, v40, 15
+; GCN-NEXT:    v_readlane_b32 s46, v40, 14
+; GCN-NEXT:    v_readlane_b32 s45, v40, 13
+; GCN-NEXT:    v_readlane_b32 s44, v40, 12
+; GCN-NEXT:    v_readlane_b32 s43, v40, 11
+; GCN-NEXT:    v_readlane_b32 s42, v40, 10
+; GCN-NEXT:    v_readlane_b32 s41, v40, 9
+; GCN-NEXT:    v_readlane_b32 s40, v40, 8
+; GCN-NEXT:    v_readlane_b32 s39, v40, 7
+; GCN-NEXT:    v_readlane_b32 s38, v40, 6
+; GCN-NEXT:    v_readlane_b32 s37, v40, 5
+; GCN-NEXT:    v_readlane_b32 s36, v40, 4
+; GCN-NEXT:    v_readlane_b32 s35, v40, 3
+; GCN-NEXT:    v_readlane_b32 s34, v40, 2
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
-; GCN-NEXT:    v_readlane_b32 s33, v40, 30
-; GCN-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT:    v_readlane_b32 s33, v40, 32
+; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GISEL-LABEL: test_indirect_tail_call_vgpr_ptr:
 ; GISEL:       ; %bb.0:
@@ -1739,89 +1760,92 @@ define void @test_indirect_tail_call_vgpr_ptr(void()* %fptr) {
 ; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GISEL-NEXT:    buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
-; GISEL-NEXT:    v_writelane_b32 v40, s33, 30
+; GISEL-NEXT:    v_writelane_b32 v40, s33, 32
 ; GISEL-NEXT:    s_mov_b32 s33, s32
 ; GISEL-NEXT:    s_addk_i32 s32, 0x400
-; GISEL-NEXT:    v_writelane_b32 v40, s34, 0
-; GISEL-NEXT:    v_writelane_b32 v40, s35, 1
-; GISEL-NEXT:    v_writelane_b32 v40, s36, 2
-; GISEL-NEXT:    v_writelane_b32 v40, s37, 3
-; GISEL-NEXT:    v_writelane_b32 v40, s38, 4
-; GISEL-NEXT:    v_writelane_b32 v40, s39, 5
-; GISEL-NEXT:    v_writelane_b32 v40, s40, 6
-; GISEL-NEXT:    v_writelane_b32 v40, s41, 7
-; GISEL-NEXT:    v_writelane_b32 v40, s42, 8
-; GISEL-NEXT:    v_writelane_b32 v40, s43, 9
-; GISEL-NEXT:    v_writelane_b32 v40, s44, 10
-; GISEL-NEXT:    v_writelane_b32 v40, s45, 11
-; GISEL-NEXT:    v_writelane_b32 v40, s46, 12
-; GISEL-NEXT:    v_writelane_b32 v40, s47, 13
-; GISEL-NEXT:    v_writelane_b32 v40, s48, 14
-; GISEL-NEXT:    v_writelane_b32 v40, s49, 15
-; GISEL-NEXT:    v_writelane_b32 v40, s50, 16
-; GISEL-NEXT:    v_writelane_b32 v40, s51, 17
-; GISEL-NEXT:    v_writelane_b32 v40, s52, 18
-; GISEL-NEXT:    v_writelane_b32 v40, s53, 19
-; GISEL-NEXT:    v_writelane_b32 v40, s54, 20
-; GISEL-NEXT:    v_writelane_b32 v40, s55, 21
-; GISEL-NEXT:    v_writelane_b32 v40, s56, 22
-; GISEL-NEXT:    v_writelane_b32 v40, s57, 23
-; GISEL-NEXT:    v_writelane_b32 v40, s58, 24
-; GISEL-NEXT:    v_writelane_b32 v40, s59, 25
-; GISEL-NEXT:    v_writelane_b32 v40, s60, 26
-; GISEL-NEXT:    v_writelane_b32 v40, s61, 27
-; GISEL-NEXT:    v_writelane_b32 v40, s62, 28
-; GISEL-NEXT:    v_writelane_b32 v40, s63, 29
-; GISEL-NEXT:    s_mov_b64 s[4:5], s[30:31]
-; GISEL-NEXT:    s_mov_b64 s[6:7], exec
+; GISEL-NEXT:    v_writelane_b32 v40, s30, 0
+; GISEL-NEXT:    v_writelane_b32 v40, s31, 1
+; GISEL-NEXT:    v_writelane_b32 v40, s34, 2
+; GISEL-NEXT:    v_writelane_b32 v40, s35, 3
+; GISEL-NEXT:    v_writelane_b32 v40, s36, 4
+; GISEL-NEXT:    v_writelane_b32 v40, s37, 5
+; GISEL-NEXT:    v_writelane_b32 v40, s38, 6
+; GISEL-NEXT:    v_writelane_b32 v40, s39, 7
+; GISEL-NEXT:    v_writelane_b32 v40, s40, 8
+; GISEL-NEXT:    v_writelane_b32 v40, s41, 9
+; GISEL-NEXT:    v_writelane_b32 v40, s42, 10
+; GISEL-NEXT:    v_writelane_b32 v40, s43, 11
+; GISEL-NEXT:    v_writelane_b32 v40, s44, 12
+; GISEL-NEXT:    v_writelane_b32 v40, s45, 13
+; GISEL-NEXT:    v_writelane_b32 v40, s46, 14
+; GISEL-NEXT:    v_writelane_b32 v40, s47, 15
+; GISEL-NEXT:    v_writelane_b32 v40, s48, 16
+; GISEL-NEXT:    v_writelane_b32 v40, s49, 17
+; GISEL-NEXT:    v_writelane_b32 v40, s50, 18
+; GISEL-NEXT:    v_writelane_b32 v40, s51, 19
+; GISEL-NEXT:    v_writelane_b32 v40, s52, 20
+; GISEL-NEXT:    v_writelane_b32 v40, s53, 21
+; GISEL-NEXT:    v_writelane_b32 v40, s54, 22
+; GISEL-NEXT:    v_writelane_b32 v40, s55, 23
+; GISEL-NEXT:    v_writelane_b32 v40, s56, 24
+; GISEL-NEXT:    v_writelane_b32 v40, s57, 25
+; GISEL-NEXT:    v_writelane_b32 v40, s58, 26
+; GISEL-NEXT:    v_writelane_b32 v40, s59, 27
+; GISEL-NEXT:    v_writelane_b32 v40, s60, 28
+; GISEL-NEXT:    v_writelane_b32 v40, s61, 29
+; GISEL-NEXT:    v_writelane_b32 v40, s62, 30
+; GISEL-NEXT:    v_writelane_b32 v40, s63, 31
+; GISEL-NEXT:    s_mov_b64 s[4:5], exec
 ; GISEL-NEXT:  .LBB9_1: ; =>This Inner Loop Header: Depth=1
-; GISEL-NEXT:    v_readfirstlane_b32 s8, v0
-; GISEL-NEXT:    v_readfirstlane_b32 s9, v1
-; GISEL-NEXT:    v_cmp_eq_u64_e32 vcc, s[8:9], v[0:1]
-; GISEL-NEXT:    s_and_saveexec_b64 s[10:11], vcc
-; GISEL-NEXT:    s_swappc_b64 s[30:31], s[8:9]
+; GISEL-NEXT:    v_readfirstlane_b32 s6, v0
+; GISEL-NEXT:    v_readfirstlane_b32 s7, v1
+; GISEL-NEXT:    v_cmp_eq_u64_e32 vcc, s[6:7], v[0:1]
+; GISEL-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GISEL-NEXT:    s_swappc_b64 s[30:31], s[6:7]
 ; GISEL-NEXT:    ; implicit-def: $vgpr0_vgpr1
-; GISEL-NEXT:    s_xor_b64 exec, exec, s[10:11]
+; GISEL-NEXT:    s_xor_b64 exec, exec, s[8:9]
 ; GISEL-NEXT:    s_cbranch_execnz .LBB9_1
 ; GISEL-NEXT:  ; %bb.2:
-; GISEL-NEXT:    s_mov_b64 exec, s[6:7]
-; GISEL-NEXT:    v_readlane_b32 s63, v40, 29
-; GISEL-NEXT:    v_readlane_b32 s62, v40, 28
-; GISEL-NEXT:    v_readlane_b32 s61, v40, 27
-; GISEL-NEXT:    v_readlane_b32 s60, v40, 26
-; GISEL-NEXT:    v_readlane_b32 s59, v40, 25
-; GISEL-NEXT:    v_readlane_b32 s58, v40, 24
-; GISEL-NEXT:    v_readlane_b32 s57, v40, 23
-; GISEL-NEXT:    v_readlane_b32 s56, v40, 22
-; GISEL-NEXT:    v_readlane_b32 s55, v40, 21
-; GISEL-NEXT:    v_readlane_b32 s54, v40, 20
-; GISEL-NEXT:    v_readlane_b32 s53, v40, 19
-; GISEL-NEXT:    v_readlane_b32 s52, v40, 18
-; GISEL-NEXT:    v_readlane_b32 s51, v40, 17
-; GISEL-NEXT:    v_readlane_b32 s50, v40, 16
-; GISEL-NEXT:    v_readlane_b32 s49, v40, 15
-; GISEL-NEXT:    v_readlane_b32 s48, v40, 14
-; GISEL-NEXT:    v_readlane_b32 s47, v40, 13
-; GISEL-NEXT:    v_readlane_b32 s46, v40, 12
-; GISEL-NEXT:    v_readlane_b32 s45, v40, 11
-; GISEL-NEXT:    v_readlane_b32 s44, v40, 10
-; GISEL-NEXT:    v_readlane_b32 s43, v40, 9
-; GISEL-NEXT:    v_readlane_b32 s42, v40, 8
-; GISEL-NEXT:    v_readlane_b32 s41, v40, 7
-; GISEL-NEXT:    v_readlane_b32 s40, v40, 6
-; GISEL-NEXT:    v_readlane_b32 s39, v40, 5
-; GISEL-NEXT:    v_readlane_b32 s38, v40, 4
-; GISEL-NEXT:    v_readlane_b32 s37, v40, 3
-; GISEL-NEXT:    v_readlane_b32 s36, v40, 2
-; GISEL-NEXT:    v_readlane_b32 s35, v40, 1
-; GISEL-NEXT:    v_readlane_b32 s34, v40, 0
+; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
+; GISEL-NEXT:    v_readlane_b32 s63, v40, 31
+; GISEL-NEXT:    v_readlane_b32 s62, v40, 30
+; GISEL-NEXT:    v_readlane_b32 s61, v40, 29
+; GISEL-NEXT:    v_readlane_b32 s60, v40, 28
+; GISEL-NEXT:    v_readlane_b32 s59, v40, 27
+; GISEL-NEXT:    v_readlane_b32 s58, v40, 26
+; GISEL-NEXT:    v_readlane_b32 s57, v40, 25
+; GISEL-NEXT:    v_readlane_b32 s56, v40, 24
+; GISEL-NEXT:    v_readlane_b32 s55, v40, 23
+; GISEL-NEXT:    v_readlane_b32 s54, v40, 22
+; GISEL-NEXT:    v_readlane_b32 s53, v40, 21
+; GISEL-NEXT:    v_readlane_b32 s52, v40, 20
+; GISEL-NEXT:    v_readlane_b32 s51, v40, 19
+; GISEL-NEXT:    v_readlane_b32 s50, v40, 18
+; GISEL-NEXT:    v_readlane_b32 s49, v40, 17
+; GISEL-NEXT:    v_readlane_b32 s48, v40, 16
+; GISEL-NEXT:    v_readlane_b32 s47, v40, 15
+; GISEL-NEXT:    v_readlane_b32 s46, v40, 14
+; GISEL-NEXT:    v_readlane_b32 s45, v40, 13
+; GISEL-NEXT:    v_readlane_b32 s44, v40, 12
+; GISEL-NEXT:    v_readlane_b32 s43, v40, 11
+; GISEL-NEXT:    v_readlane_b32 s42, v40, 10
+; GISEL-NEXT:    v_readlane_b32 s41, v40, 9
+; GISEL-NEXT:    v_readlane_b32 s40, v40, 8
+; GISEL-NEXT:    v_readlane_b32 s39, v40, 7
+; GISEL-NEXT:    v_readlane_b32 s38, v40, 6
+; GISEL-NEXT:    v_readlane_b32 s37, v40, 5
+; GISEL-NEXT:    v_readlane_b32 s36, v40, 4
+; GISEL-NEXT:    v_readlane_b32 s35, v40, 3
+; GISEL-NEXT:    v_readlane_b32 s34, v40, 2
+; GISEL-NEXT:    v_readlane_b32 s31, v40, 1
+; GISEL-NEXT:    v_readlane_b32 s30, v40, 0
 ; GISEL-NEXT:    s_addk_i32 s32, 0xfc00
-; GISEL-NEXT:    v_readlane_b32 s33, v40, 30
-; GISEL-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GISEL-NEXT:    v_readlane_b32 s33, v40, 32
+; GISEL-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GISEL-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GISEL-NEXT:    s_mov_b64 exec, s[6:7]
+; GISEL-NEXT:    s_mov_b64 exec, s[4:5]
 ; GISEL-NEXT:    s_waitcnt vmcnt(0)
-; GISEL-NEXT:    s_setpc_b64 s[4:5]
+; GISEL-NEXT:    s_setpc_b64 s[30:31]
   tail call amdgpu_gfx void %fptr()
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
index b303e69064a02..d3e18dbaaa6d9 100644
--- a/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/inline-asm.i128.ll
@@ -8,15 +8,15 @@
 define amdgpu_kernel void @s_input_output_i128() {
   ; GFX908-LABEL: name: s_input_output_i128
   ; GFX908: bb.0 (%ir-block.0):
-  ; GFX908-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 5767178 /* regdef:SGPR_128 */, def %4
+  ; GFX908-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 5242890 /* regdef:SGPR_128 */, def %4
   ; GFX908-NEXT:   [[COPY:%[0-9]+]]:sgpr_128 = COPY %4
-  ; GFX908-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5767177 /* reguse:SGPR_128 */, [[COPY]]
+  ; GFX908-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5242889 /* reguse:SGPR_128 */, [[COPY]]
   ; GFX908-NEXT:   S_ENDPGM 0
   ; GFX90A-LABEL: name: s_input_output_i128
   ; GFX90A: bb.0 (%ir-block.0):
-  ; GFX90A-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 5767178 /* regdef:SGPR_128 */, def %4
+  ; GFX90A-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 5242890 /* regdef:SGPR_128 */, def %4
   ; GFX90A-NEXT:   [[COPY:%[0-9]+]]:sgpr_128 = COPY %4
-  ; GFX90A-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5767177 /* reguse:SGPR_128 */, [[COPY]]
+  ; GFX90A-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5242889 /* reguse:SGPR_128 */, [[COPY]]
   ; GFX90A-NEXT:   S_ENDPGM 0
   %val = tail call i128 asm sideeffect "; def $0", "=s"()
   call void asm sideeffect "; use $0", "s"(i128 %val)
@@ -26,15 +26,15 @@ define amdgpu_kernel void @s_input_output_i128() {
 define amdgpu_kernel void @v_input_output_i128() {
   ; GFX908-LABEL: name: v_input_output_i128
   ; GFX908: bb.0 (%ir-block.0):
-  ; GFX908-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 5308426 /* regdef:VReg_128 */, def %4
+  ; GFX908-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 4784138 /* regdef:VReg_128 */, def %4
   ; GFX908-NEXT:   [[COPY:%[0-9]+]]:vreg_128 = COPY %4
-  ; GFX908-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5308425 /* reguse:VReg_128 */, [[COPY]]
+  ; GFX908-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4784137 /* reguse:VReg_128 */, [[COPY]]
   ; GFX908-NEXT:   S_ENDPGM 0
   ; GFX90A-LABEL: name: v_input_output_i128
   ; GFX90A: bb.0 (%ir-block.0):
-  ; GFX90A-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 5505034 /* regdef:VReg_128_Align2 */, def %4
+  ; GFX90A-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 4980746 /* regdef:VReg_128_Align2 */, def %4
   ; GFX90A-NEXT:   [[COPY:%[0-9]+]]:vreg_128_align2 = COPY %4
-  ; GFX90A-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5505033 /* reguse:VReg_128_Align2 */, [[COPY]]
+  ; GFX90A-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4980745 /* reguse:VReg_128_Align2 */, [[COPY]]
   ; GFX90A-NEXT:   S_ENDPGM 0
   %val = tail call i128 asm sideeffect "; def $0", "=v"()
   call void asm sideeffect "; use $0", "v"(i128 %val)
@@ -44,15 +44,15 @@ define amdgpu_kernel void @v_input_output_i128() {
 define amdgpu_kernel void @a_input_output_i128() {
   ; GFX908-LABEL: name: a_input_output_i128
   ; GFX908: bb.0 (%ir-block.0):
-  ; GFX908-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 5242890 /* regdef:AReg_128 */, def %4
+  ; GFX908-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 4718602 /* regdef:AReg_128 */, def %4
   ; GFX908-NEXT:   [[COPY:%[0-9]+]]:areg_128 = COPY %4
-  ; GFX908-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5242889 /* reguse:AReg_128 */, [[COPY]]
+  ; GFX908-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4718601 /* reguse:AReg_128 */, [[COPY]]
   ; GFX908-NEXT:   S_ENDPGM 0
   ; GFX90A-LABEL: name: a_input_output_i128
   ; GFX90A: bb.0 (%ir-block.0):
-  ; GFX90A-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 5439498 /* regdef:AReg_128_Align2 */, def %4
+  ; GFX90A-NEXT:   INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 4915210 /* regdef:AReg_128_Align2 */, def %4
   ; GFX90A-NEXT:   [[COPY:%[0-9]+]]:areg_128_align2 = COPY %4
-  ; GFX90A-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5439497 /* reguse:AReg_128_Align2 */, [[COPY]]
+  ; GFX90A-NEXT:   INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4915209 /* reguse:AReg_128_Align2 */, [[COPY]]
   ; GFX90A-NEXT:   S_ENDPGM 0
   %val = call i128 asm sideeffect "; def $0", "=a"()
   call void asm sideeffect "; use $0", "a"(i128 %val)

diff  --git a/llvm/test/CodeGen/AMDGPU/ipra-return-address-save-restore.ll b/llvm/test/CodeGen/AMDGPU/ipra-return-address-save-restore.ll
index 72dd90e951ec8..f34e41378b502 100644
--- a/llvm/test/CodeGen/AMDGPU/ipra-return-address-save-restore.ll
+++ b/llvm/test/CodeGen/AMDGPU/ipra-return-address-save-restore.ll
@@ -28,11 +28,14 @@ declare void @llvm.lifetime.end.p5i8(i64 immarg, i8 addrspace(5)* nocapture) #1
 ; Function Attrs: norecurse
 define internal fastcc void @svm_node_closure_bsdf(%struct.ShaderData addrspace(1)* %sd, float* %stack, <4 x i32> %node, i32* %offset, i32 %0, i8 %trunc, float %1, float %2, float %mul80, i1 %cmp412.old, <4 x i32> %3, float %4, i32 %5, i1 %cmp440, i1 %cmp442, i1 %or.cond1306, float %.op, %struct.ShaderClosure addrspace(1)* %arrayidx.i.i2202, %struct.ShaderClosure addrspace(1)* %retval.0.i.i22089, %struct.ShaderClosure addrspace(1)* %retval.1.i221310, i1 %cmp575, i32 addrspace(1)* %num_closure_left.i2215, i32 %6, i1 %cmp.i2216, i32 %7, i64 %idx.ext.i2223, i32 %sub5.i2221) #2 {
 ; GCN-LABEL: {{^}}svm_node_closure_bsdf:
-; GCN-NOT: s30,
-; GCN-NOT: s31,
+; GCN-DAG: v_writelane_b32 [[CSR_VGPR:v[0-9]+]], s30,
+; GCN-DAG: v_writelane_b32 [[CSR_VGPR]], s31,
+; GCN: s_movk_i32 s30, 0x60
+; GCN-NOT: s31
+; GCN-DAG: v_readlane_b32 s31, [[CSR_VGPR]],
+; GCN-DAG: v_readlane_b32 s30, [[CSR_VGPR]],
 ; GCN: s_waitcnt vmcnt(0)
 ; GCN: s_setpc_b64 s[30:31]
-; GCN: .size   svm_node_closure_bsdf
 entry:
   %8 = extractelement <4 x i32> %node, i64 0
   %cmp.i.not = icmp eq i32 undef, 0
@@ -167,10 +170,10 @@ sw.bb10:
 ; GCN-DAG: v_writelane_b32 [[CSR_VGPR:v[0-9]+]], s30,
 ; GCN-DAG: v_writelane_b32 [[CSR_VGPR]], s31,
 ; GCN: s_swappc_b64 s[30:31]
-; GCN-DAG: v_readlane_b32 s4, [[CSR_VGPR]],
-; GCN-DAG: v_readlane_b32 s5, [[CSR_VGPR]],
+; GCN-DAG: v_readlane_b32 s31, [[CSR_VGPR]],
+; GCN-DAG: v_readlane_b32 s30, [[CSR_VGPR]],
 ; GCN: s_waitcnt vmcnt(0)
-; GCN: s_setpc_b64 s[4:5]
+; GCN: s_setpc_b64 s[30:31]
   call fastcc void @svm_node_closure_bsdf(%struct.ShaderData addrspace(1)* null, float* null, <4 x i32> zeroinitializer, i32* null, i32 undef, i8 undef, float undef, float undef, float undef, i1 undef, <4 x i32> undef, float undef, i32 undef, i1 undef, i1 undef, i1 undef, float undef, %struct.ShaderClosure addrspace(1)* undef, %struct.ShaderClosure addrspace(1)* undef, %struct.ShaderClosure addrspace(1)* undef, i1 undef, i32 addrspace(1)* undef, i32 undef, i1 undef, i32 undef, i64 undef, i32 undef)
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/ipra.ll b/llvm/test/CodeGen/AMDGPU/ipra.ll
index 31b70e0491333..1317a89603c28 100644
--- a/llvm/test/CodeGen/AMDGPU/ipra.ll
+++ b/llvm/test/CodeGen/AMDGPU/ipra.ll
@@ -40,20 +40,16 @@ define amdgpu_kernel void @kernel_call() #0 {
 }
 
 ; GCN-LABEL: {{^}}func_regular_call:
-; GCN-NOT: buffer_store
 ; GCN-NOT: buffer_load
 ; GCN-NOT: readlane
-; GCN-NOT: writelane
-; GCN: flat_load_dword v8
+; GCN: flat_load_dword v9
 ; GCN: s_swappc_b64
-; GCN-NOT: buffer_store
 ; GCN-NOT: buffer_load
 ; GCN-NOT: readlane
-; GCN-NOT: writelane
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v8
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v9
 
-; GCN: ; NumSgprs: 32
-; GCN: ; NumVgprs: 9
+; GCN: ; NumSgprs: 34
+; GCN: ; NumVgprs: 10
 define void @func_regular_call() #1 {
   %vgpr = load volatile i32, i32 addrspace(1)* undef
   tail call void @func()
@@ -76,13 +72,13 @@ define void @func_tail_call() #1 {
 }
 
 ; GCN-LABEL: {{^}}func_call_tail_call:
-; GCN: flat_load_dword v8
+; GCN: flat_load_dword v9
 ; GCN: s_swappc_b64
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v8
+; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, v9
 ; GCN: s_setpc_b64
 
-; GCN: ; NumSgprs: 32
-; GCN: ; NumVgprs: 9
+; GCN: ; NumSgprs: 34
+; GCN: ; NumVgprs: 10
 define void @func_call_tail_call() #1 {
   %vgpr = load volatile i32, i32 addrspace(1)* undef
   tail call void @func()
@@ -98,8 +94,11 @@ define void @void_func_void() noinline {
 
 ; Make sure we don't get save/restore of FP between calls.
 ; GCN-LABEL: {{^}}test_funcx2:
-; GCN-NOT: s5
+; GCN: s_getpc_b64
 ; GCN-NOT: s32
+; GCN: s_swappc_b64
+; GCN-NOT: s32
+; GCN: s_swappc_b64
 define void @test_funcx2() #0 {
   call void @void_func_void()
   call void @void_func_void()
@@ -121,10 +120,10 @@ bb:
 ; GCN-DAG: v_writelane_b32 [[CSR_VGPR:v[0-9]+]], s30,
 ; GCN-DAG: v_writelane_b32 [[CSR_VGPR]], s31,
 ; GCN: s_swappc_b64 s[30:31]
-; GCN-DAG: v_readlane_b32 s4, [[CSR_VGPR]],
-; GCN-DAG: v_readlane_b32 s5, [[CSR_VGPR]],
+; GCN-DAG: v_readlane_b32 s30, [[CSR_VGPR]],
+; GCN-DAG: v_readlane_b32 s31, [[CSR_VGPR]],
 ; GCN: s_waitcnt vmcnt(0)
-; GCN: s_setpc_b64 s[4:5]
+; GCN: s_setpc_b64 s[30:31]
   call void @eggs()
   ret void
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.implicitarg.ptr.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.implicitarg.ptr.ll
index 9b6143d7a952b..581bb7233e505 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.implicitarg.ptr.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.implicitarg.ptr.ll
@@ -197,6 +197,8 @@ define amdgpu_kernel void @opencl_kernel_call_implicitarg_ptr_func([112 x i8]) #
 ; GCN-NOT: s8
 ; GCN-NOT: s9
 ; GCN-NOT: s[8:9]
+; GCN: s_swappc_b64
+; GCN: s_setpc_b64 s[30:31]
 define void @func_call_implicitarg_ptr_func() #0 {
   call void @func_implicitarg_ptr()
   ret void
@@ -206,6 +208,8 @@ define void @func_call_implicitarg_ptr_func() #0 {
 ; GCN-NOT: s8
 ; GCN-NOT: s9
 ; GCN-NOT: s[8:9]
+; GCN: s_swappc_b64
+; GCN: s_setpc_b64 s[30:31]
 define void @opencl_func_call_implicitarg_ptr_func() #0 {
   call void @func_implicitarg_ptr()
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.powi.ll b/llvm/test/CodeGen/AMDGPU/llvm.powi.ll
index 7838d5ab8defd..a95df62680343 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.powi.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.powi.ll
@@ -57,9 +57,9 @@ define float @v_powi_neg1_f32(float %l) {
 ; GFX7-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX7-NEXT:    v_div_scale_f32 v1, s[4:5], v0, v0, 1.0
 ; GFX7-NEXT:    v_rcp_f32_e32 v2, v1
-; GFX7-NEXT:    v_fma_f32 v3, -v1, v2, 1.0
-; GFX7-NEXT:    v_fma_f32 v2, v3, v2, v2
 ; GFX7-NEXT:    v_div_scale_f32 v3, vcc, 1.0, v0, 1.0
+; GFX7-NEXT:    v_fma_f32 v4, -v1, v2, 1.0
+; GFX7-NEXT:    v_fma_f32 v2, v4, v2, v2
 ; GFX7-NEXT:    v_mul_f32_e32 v4, v3, v2
 ; GFX7-NEXT:    v_fma_f32 v5, -v1, v4, v3
 ; GFX7-NEXT:    v_fma_f32 v4, v5, v2, v4
@@ -104,9 +104,9 @@ define float @v_powi_neg2_f32(float %l) {
 ; GFX7-NEXT:    v_mul_f32_e32 v0, v0, v0
 ; GFX7-NEXT:    v_div_scale_f32 v1, s[4:5], v0, v0, 1.0
 ; GFX7-NEXT:    v_rcp_f32_e32 v2, v1
-; GFX7-NEXT:    v_fma_f32 v3, -v1, v2, 1.0
-; GFX7-NEXT:    v_fma_f32 v2, v3, v2, v2
 ; GFX7-NEXT:    v_div_scale_f32 v3, vcc, 1.0, v0, 1.0
+; GFX7-NEXT:    v_fma_f32 v4, -v1, v2, 1.0
+; GFX7-NEXT:    v_fma_f32 v2, v4, v2, v2
 ; GFX7-NEXT:    v_mul_f32_e32 v4, v3, v2
 ; GFX7-NEXT:    v_fma_f32 v5, -v1, v4, v3
 ; GFX7-NEXT:    v_fma_f32 v4, v5, v2, v4
@@ -200,9 +200,9 @@ define float @v_powi_neg128_f32(float %l) {
 ; GFX7-NEXT:    v_mul_f32_e32 v0, v0, v0
 ; GFX7-NEXT:    v_div_scale_f32 v1, s[4:5], v0, v0, 1.0
 ; GFX7-NEXT:    v_rcp_f32_e32 v2, v1
-; GFX7-NEXT:    v_fma_f32 v3, -v1, v2, 1.0
-; GFX7-NEXT:    v_fma_f32 v2, v3, v2, v2
 ; GFX7-NEXT:    v_div_scale_f32 v3, vcc, 1.0, v0, 1.0
+; GFX7-NEXT:    v_fma_f32 v4, -v1, v2, 1.0
+; GFX7-NEXT:    v_fma_f32 v2, v4, v2, v2
 ; GFX7-NEXT:    v_mul_f32_e32 v4, v3, v2
 ; GFX7-NEXT:    v_fma_f32 v5, -v1, v4, v3
 ; GFX7-NEXT:    v_fma_f32 v4, v5, v2, v4

diff  --git a/llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll b/llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll
index 2fac70d96bc31..7ae67bad68aea 100644
--- a/llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul24-pass-ordering.ll
@@ -193,22 +193,22 @@ define void @slsr1_1(i32 %b.arg, i32 %s.arg) #0 {
 ; GFX9-NEXT:    buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
 ; GFX9-NEXT:    s_mov_b64 exec, s[4:5]
 ; GFX9-NEXT:    v_writelane_b32 v40, s33, 4
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX9-NEXT:    s_mov_b32 s33, s32
 ; GFX9-NEXT:    s_addk_i32 s32, 0x800
-; GFX9-NEXT:    v_writelane_b32 v40, s34, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX9-NEXT:    v_writelane_b32 v40, s34, 2
 ; GFX9-NEXT:    s_getpc_b64 s[4:5]
 ; GFX9-NEXT:    s_add_u32 s4, s4, foo at gotpcrel32@lo+4
 ; GFX9-NEXT:    s_addc_u32 s5, s5, foo at gotpcrel32@hi+12
-; GFX9-NEXT:    v_writelane_b32 v40, s35, 1
+; GFX9-NEXT:    v_writelane_b32 v40, s35, 3
 ; GFX9-NEXT:    s_load_dwordx2 s[34:35], s[4:5], 0x0
 ; GFX9-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
 ; GFX9-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
 ; GFX9-NEXT:    buffer_store_dword v43, off, s[0:3], s33 ; 4-byte Folded Spill
 ; GFX9-NEXT:    v_mov_b32_e32 v41, v1
 ; GFX9-NEXT:    v_mov_b32_e32 v42, v0
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 2
 ; GFX9-NEXT:    v_mul_u32_u24_e32 v0, v42, v41
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 3
 ; GFX9-NEXT:    v_and_b32_e32 v43, 0xffffff, v41
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_swappc_b64 s[30:31], s[34:35]
@@ -220,17 +220,17 @@ define void @slsr1_1(i32 %b.arg, i32 %s.arg) #0 {
 ; GFX9-NEXT:    buffer_load_dword v43, off, s[0:3], s33 ; 4-byte Folded Reload
 ; GFX9-NEXT:    buffer_load_dword v42, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
 ; GFX9-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
-; GFX9-NEXT:    v_readlane_b32 s4, v40, 2
-; GFX9-NEXT:    v_readlane_b32 s5, v40, 3
-; GFX9-NEXT:    v_readlane_b32 s35, v40, 1
-; GFX9-NEXT:    v_readlane_b32 s34, v40, 0
+; GFX9-NEXT:    v_readlane_b32 s35, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s34, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xf800
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 4
-; GFX9-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GFX9-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GFX9-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
-; GFX9-NEXT:    s_mov_b64 exec, s[6:7]
+; GFX9-NEXT:    s_mov_b64 exec, s[4:5]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    s_setpc_b64 s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
   %b = and i32 %b.arg, 16777215
   %s = and i32 %s.arg, 16777215
 

diff  --git a/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
index 68646ad229a7c..109163bf73737 100644
--- a/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
+++ b/llvm/test/CodeGen/AMDGPU/need-fp-from-vgpr-spills.ll
@@ -44,11 +44,11 @@ define internal fastcc void @csr_vgpr_spill_fp_callee() #0 {
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[8:9]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[10:11]
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; CHECK-NEXT:    v_readlane_b32 s30, v1, 0
-; CHECK-NEXT:    v_readlane_b32 s31, v1, 1
 ; CHECK-NEXT:    ;;#ASMSTART
 ; CHECK-NEXT:    ; clobber csr v40
 ; CHECK-NEXT:    ;;#ASMEND
+; CHECK-NEXT:    v_readlane_b32 s31, v1, 1
+; CHECK-NEXT:    v_readlane_b32 s30, v1, 0
 ; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s33 ; 4-byte Folded Reload
 ; CHECK-NEXT:    s_add_i32 s32, s32, 0xfffffc00
 ; CHECK-NEXT:    v_readlane_b32 s33, v1, 2
@@ -168,8 +168,8 @@ define hidden i32 @caller_save_vgpr_spill_fp_tail_call() #0 {
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[8:9]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[10:11]
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; CHECK-NEXT:    v_readlane_b32 s30, v1, 0
 ; CHECK-NEXT:    v_readlane_b32 s31, v1, 1
+; CHECK-NEXT:    v_readlane_b32 s30, v1, 0
 ; CHECK-NEXT:    s_add_i32 s32, s32, 0xfffffc00
 ; CHECK-NEXT:    v_readlane_b32 s33, v1, 2
 ; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
@@ -202,8 +202,8 @@ define hidden i32 @caller_save_vgpr_spill_fp() #0 {
 ; CHECK-NEXT:    s_mov_b64 s[0:1], s[8:9]
 ; CHECK-NEXT:    s_mov_b64 s[2:3], s[10:11]
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[4:5]
-; CHECK-NEXT:    v_readlane_b32 s30, v2, 0
 ; CHECK-NEXT:    v_readlane_b32 s31, v2, 1
+; CHECK-NEXT:    v_readlane_b32 s30, v2, 0
 ; CHECK-NEXT:    s_add_i32 s32, s32, 0xfffffc00
 ; CHECK-NEXT:    v_readlane_b32 s33, v2, 2
 ; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1

diff  --git a/llvm/test/CodeGen/AMDGPU/nested-calls.ll b/llvm/test/CodeGen/AMDGPU/nested-calls.ll
index 327136fdfe1ed..2ae8530e1ed19 100644
--- a/llvm/test/CodeGen/AMDGPU/nested-calls.ll
+++ b/llvm/test/CodeGen/AMDGPU/nested-calls.ll
@@ -22,8 +22,8 @@ declare void @external_void_func_i32(i32) #0
 
 ; GCN: s_swappc_b64
 
-; GCN: v_readlane_b32 s4, v40, 0
-; GCN: v_readlane_b32 s5, v40, 1
+; GCN: v_readlane_b32 s31, v40, 1
+; GCN: v_readlane_b32 s30, v40, 0
 
 ; GCN-NEXT: s_addk_i32 s32, 0xfc00
 ; GCN-NEXT: v_readlane_b32 s33, v40, 2
@@ -31,7 +31,7 @@ declare void @external_void_func_i32(i32) #0
 ; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
 ; GCN-NEXT: s_mov_b64 exec, [[COPY_EXEC1]]
 ; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: s_setpc_b64 s[4:5]
+; GCN-NEXT: s_setpc_b64 s[30:31]
 define void @test_func_call_external_void_func_i32_imm() #0 {
   call void @external_void_func_i32(i32 42)
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/no-remat-indirect-mov.mir b/llvm/test/CodeGen/AMDGPU/no-remat-indirect-mov.mir
index 4f4bdcd6f5b41..d5abbc0172604 100644
--- a/llvm/test/CodeGen/AMDGPU/no-remat-indirect-mov.mir
+++ b/llvm/test/CodeGen/AMDGPU/no-remat-indirect-mov.mir
@@ -30,8 +30,7 @@ body:             |
   ; GFX9-LABEL: name: index_vgpr_waterfall_loop
   ; GFX9: bb.0:
   ; GFX9:   successors: %bb.1(0x80000000)
-  ; GFX9:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16, $sgpr30_sgpr31
-  ; GFX9:   [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr30_sgpr31
+  ; GFX9:   liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7, $vgpr8, $vgpr9, $vgpr10, $vgpr11, $vgpr12, $vgpr13, $vgpr14, $vgpr15, $vgpr16
   ; GFX9:   [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr16
   ; GFX9:   undef %18.sub15:vreg_512 = COPY $vgpr15
   ; GFX9:   %18.sub14:vreg_512 = COPY $vgpr14
@@ -63,7 +62,6 @@ body:             |
   ; GFX9:   S_CBRANCH_EXECNZ %bb.1, implicit $exec
   ; GFX9: bb.2:
   ; GFX9:   $exec = S_MOV_B64 [[S_MOV_B64_]]
-  ; GFX9:   $sgpr30_sgpr31 = COPY [[COPY]]
   ; GFX9:   $vgpr0 = COPY [[V_MOV_B32_e32_]]
   ; GFX9:   S_SETPC_B64_return $sgpr30_sgpr31, implicit $vgpr0, implicit undef $vgpr1, implicit undef $vgpr2, implicit undef $vgpr3
   bb.0:

diff  --git a/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll b/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
index 84643f945ac5f..1e424ecde23d5 100644
--- a/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
+++ b/llvm/test/CodeGen/AMDGPU/no-source-locations-in-prologue.ll
@@ -19,10 +19,10 @@ define hidden void @_ZL3barv() #0 !dbg !1644 {
 ; CHECK-NEXT:    v_writelane_b32 v40, s33, 2
 ; CHECK-NEXT:    s_mov_b32 s33, s32
 ; CHECK-NEXT:    s_add_i32 s32, s32, 0x400
-; CHECK-NEXT:    v_writelane_b32 v40, s30, 0
-; CHECK-NEXT:    v_writelane_b32 v40, s31, 1
 ; CHECK-NEXT:  .Ltmp0:
 ; CHECK-NEXT:    .loc 0 31 3 prologue_end ; lane-info.cpp:31:3
+; CHECK-NEXT:    v_writelane_b32 v40, s30, 0
+; CHECK-NEXT:    v_writelane_b32 v40, s31, 1
 ; CHECK-NEXT:    s_getpc_b64 s[16:17]
 ; CHECK-NEXT:    s_add_u32 s16, s16, _ZL13sleep_foreverv at gotpcrel32@lo+4
 ; CHECK-NEXT:    s_addc_u32 s17, s17, _ZL13sleep_foreverv at gotpcrel32@hi+12
@@ -34,14 +34,14 @@ define hidden void @_ZL3barv() #0 !dbg !1644 {
 ; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[16:17]
 ; CHECK-NEXT:  .Ltmp1:
-; CHECK-NEXT:    v_readlane_b32 s30, v40, 0
+; CHECK-NEXT:    .loc 0 32 1 ; lane-info.cpp:32:1
 ; CHECK-NEXT:    v_readlane_b32 s31, v40, 1
+; CHECK-NEXT:    v_readlane_b32 s30, v40, 0
 ; CHECK-NEXT:    s_add_i32 s32, s32, 0xfffffc00
 ; CHECK-NEXT:    v_readlane_b32 s33, v40, 2
 ; CHECK-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; CHECK-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
 ; CHECK-NEXT:    s_mov_b64 exec, s[4:5]
-; CHECK-NEXT:    .loc 0 32 1 ; lane-info.cpp:32:1
 ; CHECK-NEXT:    s_waitcnt vmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
 ; CHECK-NEXT:  .Ltmp2:

diff  --git a/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir b/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir
index 933b8e91c0a78..ffb198330bd38 100644
--- a/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir
+++ b/llvm/test/CodeGen/AMDGPU/regalloc-failure-overlapping-insert-assert.mir
@@ -36,7 +36,7 @@ registers:
   - { id: 10, class: vreg_96 }
   - { id: 11, class: vreg_96 }
   - { id: 12, class: sreg_64 }
-  - { id: 13, class: ccr_sgpr_64 }
+  - { id: 13, class: sgpr_64 }
   - { id: 14, class: vgpr_32 }
 machineFunctionInfo:
   scratchRSrcReg:  '$sgpr0_sgpr1_sgpr2_sgpr3'
@@ -44,19 +44,18 @@ machineFunctionInfo:
   stackPtrOffsetReg: '$sgpr32'
 body:             |
   bb.0 (%ir-block.0):
-    liveins: $sgpr30_sgpr31
 
     INLINEASM &"; def $0", 1 /* sideeffect attdialect */, 10 /* regdef */, implicit-def $agpr0
     %14:vgpr_32 = COPY killed $agpr0
-    INLINEASM &"; def $0 $1 $2 $3 $4", 1 /* sideeffect attdialect */, 16842762 /* regdef:VReg_512 */, def %7, 14155786 /* regdef:VReg_256 */, def %8, 5308426 /* regdef:VReg_128 */, def %9, 3866634 /* regdef:VReg_96 */, def %10, 3866634 /* regdef:VReg_96 */, def %11
+    INLINEASM &"; def $0 $1 $2 $3 $4", 1 /* sideeffect attdialect */, 11534346 /* regdef:VReg_512 */, def %7, 10158090 /* regdef:VReg_256 */, def %8, 4784138 /* regdef:VReg_128 */, def %9, 3670026 /* regdef:VReg_96 */, def %10, 3670026 /* regdef:VReg_96 */, def %11
     INLINEASM &"; clobber", 1 /* sideeffect attdialect */, 12 /* clobber */, implicit-def dead early-clobber $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15_agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31, 12 /* clobber */, implicit-def dead early-clobber $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-    INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 16842761 /* reguse:VReg_512 */, %7
-    INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 14155785 /* reguse:VReg_256 */, %8
-    INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 5308425 /* reguse:VReg_128 */, %9
-    INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3866633 /* reguse:VReg_96 */, %10
-    INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3866633 /* reguse:VReg_96 */, %11
+    INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 11534345 /* reguse:VReg_512 */, %7
+    INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 10158089 /* reguse:VReg_256 */, %8
+    INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 4784137 /* reguse:VReg_128 */, %9
+    INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3670025 /* reguse:VReg_96 */, %10
+    INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 3670025 /* reguse:VReg_96 */, %11
     $agpr1 = COPY %14
     INLINEASM &"; use $0", 1 /* sideeffect attdialect */, 9 /* reguse */, killed $agpr1
-    S_SETPC_B64_return killed renamable $sgpr30_sgpr31
+    SI_RETURN
 
 ...

diff  --git a/llvm/test/CodeGen/AMDGPU/save-fp.ll b/llvm/test/CodeGen/AMDGPU/save-fp.ll
index 6bed1dbf901e9..ec56f41aa1a0a 100644
--- a/llvm/test/CodeGen/AMDGPU/save-fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/save-fp.ll
@@ -11,16 +11,14 @@ bb:
 
 ; GCN-LABEL: {{^}}caller:
 
-; GFX900:     s_mov_b32 [[SAVED_FP:s[0-9]+]], s33
-; GFX900:     s_mov_b32 s33, s32
-; GFX908-NOT: s_mov_b32 s33, s32
+; GCN:     v_writelane_b32 v2, s33, 2
+; GCN:     s_mov_b32 s33, s32
 ; GFX900:     buffer_store_dword
-; GFX908-DAG: s_mov_b32 [[SAVED_FP:s[0-9]+]], s33
 ; GFX908-DAG: v_accvgpr_write_b32
 ; GCN:        s_swappc_b64
 ; GFX900:     buffer_load_dword
 ; GFX908:     v_accvgpr_read_b32
-; GCN:        s_mov_b32 s33, [[SAVED_FP]]
+; GCN:        v_readlane_b32 s33, v2, 2
 define i64 @caller() {
 bb:
   call void asm sideeffect "", "~{v40}" ()

diff  --git a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
index a5bf6cea675b4..b7d45756f9c37 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spills-split-regalloc.ll
@@ -11,8 +11,8 @@ define void @child_function() #0 {
 ; GCN:  v_writelane_b32 v255, s30, 0
 ; GCN:  v_writelane_b32 v255, s31, 1
 ; GCN:  s_swappc_b64 s[30:31], s[4:5]
-; GCN:  v_readlane_b32 s30, v255, 0
 ; GCN:  v_readlane_b32 s31, v255, 1
+; GCN:  v_readlane_b32 s30, v255, 0
 ; GCN:  v_readlane_b32 s33, v255, 2
 ; GCN: ; NumVgprs: 256
 
@@ -57,8 +57,8 @@ define void @spill_sgpr_with_no_lower_vgpr_available() #0 {
 ; GCN:  v_writelane_b32 v254, s30, 0
 ; GCN:  v_writelane_b32 v254, s31, 1
 ; GCN:  s_swappc_b64 s[30:31], s[4:5]
-; GCN:  v_readlane_b32 s30, v254, 0
 ; GCN:  v_readlane_b32 s31, v254, 1
+; GCN:  v_readlane_b32 s30, v254, 0
 ; GCN:  v_readlane_b32 s33, v254, 2
 
 define void @spill_to_lowest_available_vgpr() #0 {
@@ -99,8 +99,8 @@ define void @spill_to_lowest_available_vgpr() #0 {
 ; GCN-LABEL: {{^}}spill_sgpr_with_sgpr_uses:
 ; GCN-NOT:  buffer_store_dword v255, off, s[0:3], s32
 ; GCN: ; def s4
-; GCN: v_writelane_b32 v254, s4, 2
-; GCN: v_readlane_b32 s4, v254, 2
+; GCN: v_writelane_b32 v254, s4, 0
+; GCN: v_readlane_b32 s4, v254, 0
 ; GCN: ; use s4
 
 define void @spill_sgpr_with_sgpr_uses() #0 {
@@ -283,12 +283,14 @@ define internal void @child_function_ipra() #0 {
 
 ; GCN-LABEL: {{^}}spill_sgpr_no_free_vgpr_ipra:
 ; GCN: v_writelane_b32 v0, s30, 0
-; GCN: v_writelane_b32 v0, s31, 1
+; GCN: buffer_store_dword v0, off
+; GCN: v_writelane_b32 v0, s31, 0
 ; GCN: buffer_store_dword v0, off
 ; GCN: swappc
 ; GCN: buffer_load_dword v0, off
+; GCN: v_readlane_b32 s31, v0, 0
+; GCN: buffer_load_dword v0, off
 ; GCN: v_readlane_b32 s30, v0, 0
-; GCN: v_readlane_b32 s31, v0, 1
 define void @spill_sgpr_no_free_vgpr_ipra() #0 {
   call void @child_function_ipra()
   ret void

diff  --git a/llvm/test/CodeGen/AMDGPU/sibling-call.ll b/llvm/test/CodeGen/AMDGPU/sibling-call.ll
index fccfb8fe2f212..054d4ef3651e4 100644
--- a/llvm/test/CodeGen/AMDGPU/sibling-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/sibling-call.ll
@@ -201,18 +201,18 @@ entry:
 ; GCN: s_or_saveexec_b64 s{{\[[0-9]+:[0-9]+\]}}, -1
 ; GCN-NEXT: buffer_store_dword [[CSRV:v[0-9]+]], off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
 ; GCN-NEXT: s_mov_b64 exec
-; GCN: s_mov_b32 s33, s32
+; GCN: v_writelane_b32 [[CSRV]], s33, 2
 ; GCN-DAG: s_addk_i32 s32, 0x400
 
-; GCN-DAG: buffer_store_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
-; GCN-DAG: buffer_store_dword v42, off, s[0:3], s33 ; 4-byte Folded Spill
-; GCN-DAG: v_writelane_b32 [[CSRV]], s34, 0
-; GCN-DAG: v_writelane_b32 [[CSRV]], s35, 1
-
 ; GCN-DAG: s_getpc_b64 s[4:5]
 ; GCN-DAG: s_add_u32 s4, s4, i32_fastcc_i32_i32 at gotpcrel32@lo+4
 ; GCN-DAG: s_addc_u32 s5, s5, i32_fastcc_i32_i32 at gotpcrel32@hi+12
 
+; GCN-DAG: v_writelane_b32 [[CSRV]], s30, 0
+; GCN-DAG: buffer_store_dword v41, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
+; GCN-DAG: buffer_store_dword v42, off, s[0:3], s33 ; 4-byte Folded Spill
+; GCN-DAG: v_writelane_b32 [[CSRV]], s31, 1
+
 
 ; GCN: s_swappc_b64
 
@@ -223,8 +223,8 @@ entry:
 ; GCN-NEXT: s_add_u32 s4, s4, sibling_call_i32_fastcc_i32_i32 at rel32@lo+4
 ; GCN-NEXT: s_addc_u32 s5, s5, sibling_call_i32_fastcc_i32_i32 at rel32@hi+12
 
-; GCN-DAG: v_readlane_b32 s34, [[CSRV]], 0
-; GCN-DAG: v_readlane_b32 s35, [[CSRV]], 1
+; GCN-DAG: v_readlane_b32 s30, [[CSRV]], 0
+; GCN-DAG: v_readlane_b32 s31, [[CSRV]], 1
 
 ; GCN: s_addk_i32 s32, 0xfc00
 ; GCN-NEXT: v_readlane_b32 s33,

diff  --git a/llvm/test/CodeGen/AMDGPU/splitkit-copy-bundle.mir b/llvm/test/CodeGen/AMDGPU/splitkit-copy-bundle.mir
index 23714f0323ef3..d7e368f4d6d9f 100644
--- a/llvm/test/CodeGen/AMDGPU/splitkit-copy-bundle.mir
+++ b/llvm/test/CodeGen/AMDGPU/splitkit-copy-bundle.mir
@@ -89,11 +89,11 @@ body:             |
   ; VR:   renamable $sgpr37 = S_MOV_B32 -1
   ; VR:   renamable $sgpr36 = S_MOV_B32 -1
   ; VR:   renamable $sgpr68 = S_MOV_B32 0
+  ; VR:   renamable $sgpr30_sgpr31 = IMPLICIT_DEF
   ; VR:   renamable $sgpr34_sgpr35 = IMPLICIT_DEF
-  ; VR:   renamable $sgpr66_sgpr67 = IMPLICIT_DEF
   ; VR: bb.1:
   ; VR:   successors: %bb.2(0x80000000)
-  ; VR:   liveins: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67:0x000000000000000F, $sgpr68_sgpr69_sgpr70_sgpr71_sgpr72_sgpr73_sgpr74_sgpr75_sgpr76_sgpr77_sgpr78_sgpr79_sgpr80_sgpr81_sgpr82_sgpr83_sgpr84_sgpr85_sgpr86_sgpr87_sgpr88_sgpr89_sgpr90_sgpr91_sgpr92_sgpr93_sgpr94_sgpr95_sgpr96_sgpr97_sgpr98_sgpr99:0x0000000000000003, $sgpr34_sgpr35, $sgpr66_sgpr67
+  ; VR:   liveins: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67:0x000000000000000F, $sgpr68_sgpr69_sgpr70_sgpr71_sgpr72_sgpr73_sgpr74_sgpr75_sgpr76_sgpr77_sgpr78_sgpr79_sgpr80_sgpr81_sgpr82_sgpr83_sgpr84_sgpr85_sgpr86_sgpr87_sgpr88_sgpr89_sgpr90_sgpr91_sgpr92_sgpr93_sgpr94_sgpr95_sgpr96_sgpr97_sgpr98_sgpr99:0x0000000000000003, $sgpr30_sgpr31, $sgpr34_sgpr35
   ; VR:   renamable $sgpr38 = COPY renamable $sgpr36
   ; VR:   renamable $sgpr39 = COPY renamable $sgpr37
   ; VR:   renamable $sgpr40 = COPY renamable $sgpr36
@@ -155,8 +155,8 @@ body:             |
   ; VR:   renamable $sgpr99 = COPY renamable $sgpr68
   ; VR: bb.2:
   ; VR:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
-  ; VR:   liveins: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67:0x000000000000000F, $sgpr68_sgpr69_sgpr70_sgpr71_sgpr72_sgpr73_sgpr74_sgpr75_sgpr76_sgpr77_sgpr78_sgpr79_sgpr80_sgpr81_sgpr82_sgpr83_sgpr84_sgpr85_sgpr86_sgpr87_sgpr88_sgpr89_sgpr90_sgpr91_sgpr92_sgpr93_sgpr94_sgpr95_sgpr96_sgpr97_sgpr98_sgpr99:0x0000000000000003, $sgpr34_sgpr35, $sgpr66_sgpr67
-  ; VR:   S_NOP 0, csr_amdgpu_highregs, implicit renamable $sgpr34_sgpr35, implicit renamable $sgpr66_sgpr67
+  ; VR:   liveins: $sgpr36_sgpr37_sgpr38_sgpr39_sgpr40_sgpr41_sgpr42_sgpr43_sgpr44_sgpr45_sgpr46_sgpr47_sgpr48_sgpr49_sgpr50_sgpr51_sgpr52_sgpr53_sgpr54_sgpr55_sgpr56_sgpr57_sgpr58_sgpr59_sgpr60_sgpr61_sgpr62_sgpr63_sgpr64_sgpr65_sgpr66_sgpr67:0x000000000000000F, $sgpr68_sgpr69_sgpr70_sgpr71_sgpr72_sgpr73_sgpr74_sgpr75_sgpr76_sgpr77_sgpr78_sgpr79_sgpr80_sgpr81_sgpr82_sgpr83_sgpr84_sgpr85_sgpr86_sgpr87_sgpr88_sgpr89_sgpr90_sgpr91_sgpr92_sgpr93_sgpr94_sgpr95_sgpr96_sgpr97_sgpr98_sgpr99:0x0000000000000003, $sgpr30_sgpr31, $sgpr34_sgpr35
+  ; VR:   S_NOP 0, csr_amdgpu_highregs, implicit renamable $sgpr30_sgpr31, implicit renamable $sgpr34_sgpr35
   ; VR:   S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
   ; VR:   S_BRANCH %bb.2
   bb.0:

diff  --git a/llvm/test/CodeGen/AMDGPU/stack-realign.ll b/llvm/test/CodeGen/AMDGPU/stack-realign.ll
index c7031a8921504..37df92e9700a8 100644
--- a/llvm/test/CodeGen/AMDGPU/stack-realign.ll
+++ b/llvm/test/CodeGen/AMDGPU/stack-realign.ll
@@ -171,12 +171,15 @@ define void @func_call_align1024_bp_gets_vgpr_spill(<32 x i32> %a, i32 %b) #0 {
 ; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s32
 ; GCN: s_swappc_b64 s[30:31],
 
+; GCN: v_readlane_b32 s31, [[VGPR_REG]], 1
+; GCN: v_readlane_b32 s30, [[VGPR_REG]], 0
 ; GCN: s_add_i32 s32, s32, 0xfffd0000
 ; GCN-NEXT: v_readlane_b32 s33, [[VGPR_REG]], 2
 ; GCN-NEXT: v_readlane_b32 s34, [[VGPR_REG]], 3
-; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT: s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT: buffer_load_dword [[VGPR_REG]], off, s[0:3], s32 offset:1028 ; 4-byte Folded Reload
-; GCN-NEXT: s_mov_b64 exec, s[6:7]
+; GCN-NEXT: s_mov_b64 exec, s[4:5]
+; GCN: s_setpc_b64 s[30:31]
   %temp = alloca i32, align 1024, addrspace(5)
   store volatile i32 0, i32 addrspace(5)* %temp, align 1024
   call void @extern_func(<32 x i32> %a, i32 %b)

diff  --git a/llvm/test/CodeGen/AMDGPU/strict_fadd.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_fadd.f16.ll
index 884b56e7a5304..bece8aab1e3d2 100644
--- a/llvm/test/CodeGen/AMDGPU/strict_fadd.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/strict_fadd.f16.ll
@@ -186,13 +186,13 @@ define <4 x half> @v_constained_fadd_v4f16_fpexcept_strict(<4 x half> %x, <4 x h
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    v_add_f16_e32 v4, v0, v2
-; GFX10-NEXT:    v_mov_b32_e32 v5, 0xffff
+; GFX10-NEXT:    v_mov_b32_e32 v4, 0xffff
+; GFX10-NEXT:    v_add_f16_e32 v5, v0, v2
 ; GFX10-NEXT:    v_add_f16_e32 v6, v1, v3
 ; GFX10-NEXT:    v_add_f16_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
 ; GFX10-NEXT:    v_add_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX10-NEXT:    v_and_b32_e32 v2, v5, v4
-; GFX10-NEXT:    v_and_b32_e32 v3, v5, v6
+; GFX10-NEXT:    v_and_b32_e32 v2, v4, v5
+; GFX10-NEXT:    v_and_b32_e32 v3, v4, v6
 ; GFX10-NEXT:    v_lshl_or_b32 v0, v0, 16, v2
 ; GFX10-NEXT:    v_lshl_or_b32 v1, v1, 16, v3
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]

diff  --git a/llvm/test/CodeGen/AMDGPU/strict_fmul.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_fmul.f16.ll
index 34c5e908ac310..00aa96fea128a 100644
--- a/llvm/test/CodeGen/AMDGPU/strict_fmul.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/strict_fmul.f16.ll
@@ -186,13 +186,13 @@ define <4 x half> @v_constained_fmul_v4f16_fpexcept_strict(<4 x half> %x, <4 x h
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    v_mul_f16_e32 v4, v0, v2
-; GFX10-NEXT:    v_mov_b32_e32 v5, 0xffff
+; GFX10-NEXT:    v_mov_b32_e32 v4, 0xffff
+; GFX10-NEXT:    v_mul_f16_e32 v5, v0, v2
 ; GFX10-NEXT:    v_mul_f16_e32 v6, v1, v3
 ; GFX10-NEXT:    v_mul_f16_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
 ; GFX10-NEXT:    v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX10-NEXT:    v_and_b32_e32 v2, v5, v4
-; GFX10-NEXT:    v_and_b32_e32 v3, v5, v6
+; GFX10-NEXT:    v_and_b32_e32 v2, v4, v5
+; GFX10-NEXT:    v_and_b32_e32 v3, v4, v6
 ; GFX10-NEXT:    v_lshl_or_b32 v0, v0, 16, v2
 ; GFX10-NEXT:    v_lshl_or_b32 v1, v1, 16, v3
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]

diff  --git a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
index 4cf75f68a7a1d..802c08fe64a26 100644
--- a/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
+++ b/llvm/test/CodeGen/AMDGPU/strict_fsub.f16.ll
@@ -206,13 +206,13 @@ define <4 x half> @v_constained_fsub_v4f16_fpexcept_strict(<4 x half> %x, <4 x h
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
-; GFX10-NEXT:    v_sub_f16_e32 v4, v0, v2
-; GFX10-NEXT:    v_mov_b32_e32 v5, 0xffff
+; GFX10-NEXT:    v_mov_b32_e32 v4, 0xffff
+; GFX10-NEXT:    v_sub_f16_e32 v5, v0, v2
 ; GFX10-NEXT:    v_sub_f16_e32 v6, v1, v3
 ; GFX10-NEXT:    v_sub_f16_sdwa v1, v1, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
 ; GFX10-NEXT:    v_sub_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; GFX10-NEXT:    v_and_b32_e32 v2, v5, v4
-; GFX10-NEXT:    v_and_b32_e32 v3, v5, v6
+; GFX10-NEXT:    v_and_b32_e32 v2, v4, v5
+; GFX10-NEXT:    v_and_b32_e32 v3, v4, v6
 ; GFX10-NEXT:    v_lshl_or_b32 v0, v0, 16, v2
 ; GFX10-NEXT:    v_lshl_or_b32 v1, v1, 16, v3
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]

diff  --git a/llvm/test/CodeGen/AMDGPU/tail-call-amdgpu-gfx.ll b/llvm/test/CodeGen/AMDGPU/tail-call-amdgpu-gfx.ll
index 4894eeca58253..fba11bf0124ef 100644
--- a/llvm/test/CodeGen/AMDGPU/tail-call-amdgpu-gfx.ll
+++ b/llvm/test/CodeGen/AMDGPU/tail-call-amdgpu-gfx.ll
@@ -20,25 +20,28 @@ define amdgpu_gfx float @caller(float %arg0) {
 ; GCN-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GCN-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_mov_b64 exec, s[34:35]
-; GCN-NEXT:    v_writelane_b32 v1, s33, 1
+; GCN-NEXT:    v_writelane_b32 v1, s33, 3
+; GCN-NEXT:    v_writelane_b32 v1, s4, 0
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_addk_i32 s32, 0x400
-; GCN-NEXT:    v_writelane_b32 v1, s4, 0
+; GCN-NEXT:    v_writelane_b32 v1, s30, 1
 ; GCN-NEXT:    v_add_f32_e32 v0, 1.0, v0
 ; GCN-NEXT:    s_mov_b32 s4, 2.0
-; GCN-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GCN-NEXT:    s_getpc_b64 s[30:31]
-; GCN-NEXT:    s_add_u32 s30, s30, callee at rel32@lo+4
-; GCN-NEXT:    s_addc_u32 s31, s31, callee at rel32@hi+12
-; GCN-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GCN-NEXT:    v_writelane_b32 v1, s31, 2
+; GCN-NEXT:    s_getpc_b64 s[34:35]
+; GCN-NEXT:    s_add_u32 s34, s34, callee at rel32@lo+4
+; GCN-NEXT:    s_addc_u32 s35, s35, callee at rel32@hi+12
+; GCN-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GCN-NEXT:    v_readlane_b32 s31, v1, 2
+; GCN-NEXT:    v_readlane_b32 s30, v1, 1
 ; GCN-NEXT:    v_readlane_b32 s4, v1, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
-; GCN-NEXT:    v_readlane_b32 s33, v1, 1
-; GCN-NEXT:    s_or_saveexec_b64 s[30:31], -1
+; GCN-NEXT:    v_readlane_b32 s33, v1, 3
+; GCN-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], s32 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[30:31]
+; GCN-NEXT:    s_mov_b64 exec, s[34:35]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[36:37]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
   %add = fadd float %arg0, 1.0
   %call = tail call amdgpu_gfx float @callee(float %add, float inreg 2.0)
   ret float %call

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv.ll b/llvm/test/CodeGen/AMDGPU/udiv.ll
index 574c7671de936..2a3100979883e 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv.ll
@@ -2499,7 +2499,7 @@ define i64 @v_test_udiv64_mulhi_fold(i64 %arg) {
 ; SI-NEXT:    v_madak_f32 v2, 0, v2, 0x47c35000
 ; SI-NEXT:    v_rcp_f32_e32 v2, v2
 ; SI-NEXT:    s_mov_b32 s4, 0xfffe7960
-; SI-NEXT:    v_mov_b32_e32 v9, 0
+; SI-NEXT:    v_mov_b32_e32 v8, 0
 ; SI-NEXT:    v_mul_f32_e32 v2, 0x5f7ffffc, v2
 ; SI-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v2
 ; SI-NEXT:    v_trunc_f32_e32 v3, v3
@@ -2507,22 +2507,22 @@ define i64 @v_test_udiv64_mulhi_fold(i64 %arg) {
 ; SI-NEXT:    v_cvt_u32_f32_e32 v2, v2
 ; SI-NEXT:    v_cvt_u32_f32_e32 v3, v3
 ; SI-NEXT:    v_mul_hi_u32 v4, v2, s4
-; SI-NEXT:    v_mul_lo_u32 v5, v3, s4
-; SI-NEXT:    v_mul_lo_u32 v6, v2, s4
+; SI-NEXT:    v_mul_lo_u32 v6, v3, s4
+; SI-NEXT:    v_mul_lo_u32 v5, v2, s4
 ; SI-NEXT:    v_subrev_i32_e32 v4, vcc, v2, v4
-; SI-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; SI-NEXT:    v_mul_lo_u32 v5, v2, v4
-; SI-NEXT:    v_mul_hi_u32 v7, v2, v6
-; SI-NEXT:    v_mul_hi_u32 v8, v2, v4
+; SI-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
+; SI-NEXT:    v_mul_hi_u32 v7, v2, v5
+; SI-NEXT:    v_mul_lo_u32 v6, v2, v4
+; SI-NEXT:    v_mul_hi_u32 v9, v2, v4
 ; SI-NEXT:    v_mul_hi_u32 v10, v3, v4
 ; SI-NEXT:    v_mul_lo_u32 v4, v3, v4
-; SI-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
-; SI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; SI-NEXT:    v_mul_lo_u32 v8, v3, v6
-; SI-NEXT:    v_mul_hi_u32 v6, v3, v6
-; SI-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
-; SI-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
-; SI-NEXT:    v_addc_u32_e32 v6, vcc, v10, v9, vcc
+; SI-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; SI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v9, vcc
+; SI-NEXT:    v_mul_lo_u32 v9, v3, v5
+; SI-NEXT:    v_mul_hi_u32 v5, v3, v5
+; SI-NEXT:    v_add_i32_e32 v6, vcc, v6, v9
+; SI-NEXT:    v_addc_u32_e32 v5, vcc, v7, v5, vcc
+; SI-NEXT:    v_addc_u32_e32 v6, vcc, v10, v8, vcc
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
 ; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
@@ -2535,16 +2535,16 @@ define i64 @v_test_udiv64_mulhi_fold(i64 %arg) {
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, v4, v5
 ; SI-NEXT:    v_mul_lo_u32 v5, v2, v4
 ; SI-NEXT:    v_mul_hi_u32 v7, v2, v6
-; SI-NEXT:    v_mul_hi_u32 v8, v2, v4
+; SI-NEXT:    v_mul_hi_u32 v9, v2, v4
 ; SI-NEXT:    v_mul_hi_u32 v10, v3, v4
 ; SI-NEXT:    v_mul_lo_u32 v4, v3, v4
 ; SI-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
-; SI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; SI-NEXT:    v_mul_lo_u32 v8, v3, v6
+; SI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v9, vcc
+; SI-NEXT:    v_mul_lo_u32 v9, v3, v6
 ; SI-NEXT:    v_mul_hi_u32 v6, v3, v6
-; SI-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
+; SI-NEXT:    v_add_i32_e32 v5, vcc, v5, v9
 ; SI-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
-; SI-NEXT:    v_addc_u32_e32 v6, vcc, v10, v9, vcc
+; SI-NEXT:    v_addc_u32_e32 v6, vcc, v10, v8, vcc
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
 ; SI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
@@ -2560,7 +2560,7 @@ define i64 @v_test_udiv64_mulhi_fold(i64 %arg) {
 ; SI-NEXT:    v_mul_hi_u32 v2, v1, v2
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
 ; SI-NEXT:    v_addc_u32_e32 v2, vcc, v5, v2, vcc
-; SI-NEXT:    v_addc_u32_e32 v4, vcc, v7, v9, vcc
+; SI-NEXT:    v_addc_u32_e32 v4, vcc, v7, v8, vcc
 ; SI-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
 ; SI-NEXT:    v_mul_lo_u32 v4, v3, s4
@@ -2609,15 +2609,15 @@ define i64 @v_test_udiv64_mulhi_fold(i64 %arg) {
 ; VI-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v6, s6, 0
 ; VI-NEXT:    v_mul_lo_u32 v4, v7, s6
 ; VI-NEXT:    v_subrev_u32_e32 v3, vcc, v6, v3
-; VI-NEXT:    v_add_u32_e32 v5, vcc, v4, v3
-; VI-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v6, v5, 0
-; VI-NEXT:    v_mul_hi_u32 v8, v6, v2
-; VI-NEXT:    v_add_u32_e32 v8, vcc, v8, v3
+; VI-NEXT:    v_add_u32_e32 v8, vcc, v4, v3
+; VI-NEXT:    v_mul_hi_u32 v5, v6, v2
+; VI-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v6, v8, 0
+; VI-NEXT:    v_add_u32_e32 v10, vcc, v5, v3
 ; VI-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v7, v2, 0
-; VI-NEXT:    v_addc_u32_e32 v10, vcc, 0, v4, vcc
-; VI-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v7, v5, 0
-; VI-NEXT:    v_add_u32_e32 v2, vcc, v8, v2
-; VI-NEXT:    v_addc_u32_e32 v2, vcc, v10, v3, vcc
+; VI-NEXT:    v_addc_u32_e32 v11, vcc, 0, v4, vcc
+; VI-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v7, v8, 0
+; VI-NEXT:    v_add_u32_e32 v2, vcc, v10, v2
+; VI-NEXT:    v_addc_u32_e32 v2, vcc, v11, v3, vcc
 ; VI-NEXT:    v_addc_u32_e32 v3, vcc, v5, v9, vcc
 ; VI-NEXT:    v_add_u32_e32 v2, vcc, v2, v4
 ; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
@@ -2697,15 +2697,15 @@ define i64 @v_test_udiv64_mulhi_fold(i64 %arg) {
 ; GCN-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v6, s6, 0
 ; GCN-NEXT:    v_mul_lo_u32 v4, v7, s6
 ; GCN-NEXT:    v_subrev_u32_e32 v3, vcc, v6, v3
-; GCN-NEXT:    v_add_u32_e32 v5, vcc, v4, v3
-; GCN-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v6, v5, 0
-; GCN-NEXT:    v_mul_hi_u32 v8, v6, v2
-; GCN-NEXT:    v_add_u32_e32 v8, vcc, v8, v3
+; GCN-NEXT:    v_add_u32_e32 v8, vcc, v4, v3
+; GCN-NEXT:    v_mul_hi_u32 v5, v6, v2
+; GCN-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], v6, v8, 0
+; GCN-NEXT:    v_add_u32_e32 v10, vcc, v5, v3
 ; GCN-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], v7, v2, 0
-; GCN-NEXT:    v_addc_u32_e32 v10, vcc, 0, v4, vcc
-; GCN-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v7, v5, 0
-; GCN-NEXT:    v_add_u32_e32 v2, vcc, v8, v2
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v10, v3, vcc
+; GCN-NEXT:    v_addc_u32_e32 v11, vcc, 0, v4, vcc
+; GCN-NEXT:    v_mad_u64_u32 v[4:5], s[4:5], v7, v8, 0
+; GCN-NEXT:    v_add_u32_e32 v2, vcc, v10, v2
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v11, v3, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v9, vcc
 ; GCN-NEXT:    v_add_u32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index cbbb5ab70807d..c23aff6d25fd6 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -229,10 +229,10 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-NEXT:    v_mac_f32_e32 v4, 0xcf800000, v5
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v5, v5
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v4, v4
-; GCN-NEXT:    v_mul_lo_u32 v9, v6, v5
-; GCN-NEXT:    v_mul_hi_u32 v8, v6, v4
+; GCN-NEXT:    v_mul_lo_u32 v8, v6, v5
+; GCN-NEXT:    v_mul_hi_u32 v9, v6, v4
 ; GCN-NEXT:    v_mul_lo_u32 v10, v7, v4
-; GCN-NEXT:    v_add_i32_e32 v8, vcc, v8, v9
+; GCN-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
 ; GCN-NEXT:    v_mul_lo_u32 v9, v6, v4
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, v8, v10
 ; GCN-NEXT:    v_mul_lo_u32 v10, v4, v8
@@ -1554,7 +1554,7 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_madak_f32 v2, 0, v2, 0x41c00000
 ; GCN-NEXT:    v_rcp_f32_e32 v2, v2
 ; GCN-NEXT:    s_movk_i32 s4, 0xffe8
-; GCN-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-NEXT:    v_mul_f32_e32 v2, 0x5f7ffffc, v2
 ; GCN-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v3, v3
@@ -1562,22 +1562,22 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
 ; GCN-NEXT:    v_mul_hi_u32 v4, v2, s4
-; GCN-NEXT:    v_mul_lo_u32 v5, v3, s4
-; GCN-NEXT:    v_mul_lo_u32 v6, v2, s4
+; GCN-NEXT:    v_mul_lo_u32 v6, v3, s4
+; GCN-NEXT:    v_mul_lo_u32 v5, v2, s4
 ; GCN-NEXT:    v_subrev_i32_e32 v4, vcc, v2, v4
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_mul_lo_u32 v5, v2, v4
-; GCN-NEXT:    v_mul_hi_u32 v7, v2, v6
-; GCN-NEXT:    v_mul_hi_u32 v8, v2, v4
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
+; GCN-NEXT:    v_mul_hi_u32 v7, v2, v5
+; GCN-NEXT:    v_mul_lo_u32 v6, v2, v4
+; GCN-NEXT:    v_mul_hi_u32 v9, v2, v4
 ; GCN-NEXT:    v_mul_hi_u32 v10, v3, v4
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v8, v3, v6
-; GCN-NEXT:    v_mul_hi_u32 v6, v3, v6
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v10, v9, vcc
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v9, vcc
+; GCN-NEXT:    v_mul_lo_u32 v9, v3, v5
+; GCN-NEXT:    v_mul_hi_u32 v5, v3, v5
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, v6, v9
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v5, vcc
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v10, v8, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
@@ -1589,16 +1589,16 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v5
 ; GCN-NEXT:    v_mul_lo_u32 v5, v2, v4
 ; GCN-NEXT:    v_mul_hi_u32 v7, v2, v6
-; GCN-NEXT:    v_mul_hi_u32 v8, v2, v4
+; GCN-NEXT:    v_mul_hi_u32 v9, v2, v4
 ; GCN-NEXT:    v_mul_hi_u32 v10, v3, v4
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v8, v3, v6
+; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v9, vcc
+; GCN-NEXT:    v_mul_lo_u32 v9, v3, v6
 ; GCN-NEXT:    v_mul_hi_u32 v6, v3, v6
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v9
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v10, v9, vcc
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v10, v8, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
@@ -1614,7 +1614,7 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_mul_hi_u32 v2, v1, v2
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v5, v2, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v7, v9, vcc
+; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v7, v8, vcc
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, 24

diff  --git a/llvm/test/CodeGen/AMDGPU/unstructured-cfg-def-use-issue.ll b/llvm/test/CodeGen/AMDGPU/unstructured-cfg-def-use-issue.ll
index 2a72daf0365c0..f494922139d9a 100644
--- a/llvm/test/CodeGen/AMDGPU/unstructured-cfg-def-use-issue.ll
+++ b/llvm/test/CodeGen/AMDGPU/unstructured-cfg-def-use-issue.ll
@@ -48,15 +48,15 @@ define hidden void @widget() {
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    flat_store_dword v[0:1], v2
 ; GCN-NEXT:  .LBB0_7: ; %UnifiedReturnBlock
-; GCN-NEXT:    v_readlane_b32 s4, v40, 0
-; GCN-NEXT:    v_readlane_b32 s5, v40, 1
+; GCN-NEXT:    v_readlane_b32 s31, v40, 1
+; GCN-NEXT:    v_readlane_b32 s30, v40, 0
 ; GCN-NEXT:    s_addk_i32 s32, 0xfc00
 ; GCN-NEXT:    v_readlane_b32 s33, v40, 2
-; GCN-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GCN-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GCN-NEXT:    buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
-; GCN-NEXT:    s_mov_b64 exec, s[6:7]
+; GCN-NEXT:    s_mov_b64 exec, s[4:5]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    s_setpc_b64 s[4:5]
+; GCN-NEXT:    s_setpc_b64 s[30:31]
 ; SI-OPT-LABEL: @widget(
 ; SI-OPT-NEXT:  bb:
 ; SI-OPT-NEXT:    [[TMP:%.*]] = load i32, i32 addrspace(1)* null, align 16
@@ -185,7 +185,7 @@ define hidden void @blam() {
 ; GCN-NEXT:    s_or_saveexec_b64 s[16:17], -1
 ; GCN-NEXT:    buffer_store_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
 ; GCN-NEXT:    s_mov_b64 exec, s[16:17]
-; GCN-NEXT:    v_writelane_b32 v40, s33, 15
+; GCN-NEXT:    v_writelane_b32 v40, s33, 17
 ; GCN-NEXT:    s_mov_b32 s33, s32
 ; GCN-NEXT:    s_addk_i32 s32, 0x800
 ; GCN-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill
@@ -193,21 +193,23 @@ define hidden void @blam() {
 ; GCN-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
 ; GCN-NEXT:    buffer_store_dword v45, off, s[0:3], s33 ; 4-byte Folded Spill
-; GCN-NEXT:    v_writelane_b32 v40, s34, 0
-; GCN-NEXT:    v_writelane_b32 v40, s35, 1
-; GCN-NEXT:    v_writelane_b32 v40, s36, 2
-; GCN-NEXT:    v_writelane_b32 v40, s37, 3
-; GCN-NEXT:    v_writelane_b32 v40, s38, 4
-; GCN-NEXT:    v_writelane_b32 v40, s39, 5
-; GCN-NEXT:    v_writelane_b32 v40, s40, 6
-; GCN-NEXT:    v_writelane_b32 v40, s41, 7
-; GCN-NEXT:    v_writelane_b32 v40, s42, 8
-; GCN-NEXT:    v_writelane_b32 v40, s43, 9
-; GCN-NEXT:    v_writelane_b32 v40, s44, 10
-; GCN-NEXT:    v_writelane_b32 v40, s45, 11
-; GCN-NEXT:    v_writelane_b32 v40, s46, 12
-; GCN-NEXT:    v_writelane_b32 v40, s48, 13
-; GCN-NEXT:    v_writelane_b32 v40, s49, 14
+; GCN-NEXT:    v_writelane_b32 v40, s30, 0
+; GCN-NEXT:    v_writelane_b32 v40, s31, 1
+; GCN-NEXT:    v_writelane_b32 v40, s34, 2
+; GCN-NEXT:    v_writelane_b32 v40, s35, 3
+; GCN-NEXT:    v_writelane_b32 v40, s36, 4
+; GCN-NEXT:    v_writelane_b32 v40, s37, 5
+; GCN-NEXT:    v_writelane_b32 v40, s38, 6
+; GCN-NEXT:    v_writelane_b32 v40, s39, 7
+; GCN-NEXT:    v_writelane_b32 v40, s40, 8
+; GCN-NEXT:    v_writelane_b32 v40, s41, 9
+; GCN-NEXT:    v_writelane_b32 v40, s42, 10
+; GCN-NEXT:    v_writelane_b32 v40, s43, 11
+; GCN-NEXT:    v_writelane_b32 v40, s44, 12
+; GCN-NEXT:    v_writelane_b32 v40, s45, 13
+; GCN-NEXT:    v_writelane_b32 v40, s46, 14
+; GCN-NEXT:    v_writelane_b32 v40, s48, 15
+; GCN-NEXT:    v_writelane_b32 v40, s49, 16
 ; GCN-NEXT:    v_mov_b32_e32 v41, v31
 ; GCN-NEXT:    s_mov_b32 s44, s14
 ; GCN-NEXT:    s_mov_b32 s45, s13

diff  --git a/llvm/test/CodeGen/AMDGPU/urem-seteq-illegal-types.ll b/llvm/test/CodeGen/AMDGPU/urem-seteq-illegal-types.ll
index 79723603f1c1a..ba3d120be9ba2 100644
--- a/llvm/test/CodeGen/AMDGPU/urem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem-seteq-illegal-types.ll
@@ -76,22 +76,21 @@ define <3 x i1> @test_urem_vec(<3 x i11> %X) nounwind {
 ; CHECK-NEXT:    s_mov_b32 s5, 0x8311eb33
 ; CHECK-NEXT:    s_mov_b32 s6, 0x20140c
 ; CHECK-NEXT:    s_mov_b32 s7, 0xb6db6db7
-; CHECK-NEXT:    s_mov_b32 s8, 0x49249249
-; CHECK-NEXT:    s_mov_b32 s9, 0x24924924
-; CHECK-NEXT:    s_mov_b32 s10, 0xaaaaaaab
-; CHECK-NEXT:    s_mov_b32 s11, 0x2aaaaaaa
+; CHECK-NEXT:    s_mov_b32 s8, 0x24924924
+; CHECK-NEXT:    s_mov_b32 s9, 0xaaaaaaab
+; CHECK-NEXT:    s_mov_b32 s10, 0x2aaaaaaa
 ; CHECK-NEXT:    v_and_b32_e32 v0, s4, v0
 ; CHECK-NEXT:    v_and_b32_e32 v1, s4, v1
 ; CHECK-NEXT:    v_and_b32_e32 v2, s4, v2
 ; CHECK-NEXT:    v_mul_lo_u32 v2, v2, s5
 ; CHECK-NEXT:    v_mul_lo_u32 v1, v1, s7
-; CHECK-NEXT:    v_mul_lo_u32 v0, v0, s10
+; CHECK-NEXT:    v_mul_lo_u32 v0, v0, s9
 ; CHECK-NEXT:    v_add_i32_e32 v2, vcc, 0xf9dc299a, v2
-; CHECK-NEXT:    v_add_i32_e32 v1, vcc, s8, v1
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, 0x49249249, v1
 ; CHECK-NEXT:    v_alignbit_b32 v0, v0, v0, 1
-; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc, s11, v0
+; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc, s10, v0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc, s9, v1
+; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc, s8, v1
 ; CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
 ; CHECK-NEXT:    v_cmp_lt_u32_e32 vcc, s6, v2
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc

diff  --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 85a751b428251..e609f57194c43 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -239,10 +239,10 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-NEXT:    v_mac_f32_e32 v4, 0xcf800000, v5
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v5, v5
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v4, v4
-; GCN-NEXT:    v_mul_lo_u32 v9, v6, v5
-; GCN-NEXT:    v_mul_hi_u32 v8, v6, v4
+; GCN-NEXT:    v_mul_lo_u32 v8, v6, v5
+; GCN-NEXT:    v_mul_hi_u32 v9, v6, v4
 ; GCN-NEXT:    v_mul_lo_u32 v10, v7, v4
-; GCN-NEXT:    v_add_i32_e32 v8, vcc, v8, v9
+; GCN-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
 ; GCN-NEXT:    v_mul_lo_u32 v9, v6, v4
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, v8, v10
 ; GCN-NEXT:    v_mul_lo_u32 v10, v4, v8

diff  --git a/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll b/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll
index 0a99c4dd78923..cbd561224add3 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-tuple-allocation.ll
@@ -58,15 +58,15 @@ define <4 x float> @non_preserved_vgpr_tuple8(<8 x i32> %rsrc, <4 x i32> %samp,
 ; GFX9-NEXT:    buffer_load_dword v43, off, s[0:3], s33 offset:4 ; 4-byte Folded Reload
 ; GFX9-NEXT:    buffer_load_dword v42, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
 ; GFX9-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload
-; GFX9-NEXT:    v_readlane_b32 s4, v40, 0
-; GFX9-NEXT:    v_readlane_b32 s5, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xf800
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 2
-; GFX9-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GFX9-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GFX9-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
-; GFX9-NEXT:    s_mov_b64 exec, s[6:7]
+; GFX9-NEXT:    s_mov_b64 exec, s[4:5]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    s_setpc_b64 s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: non_preserved_vgpr_tuple8:
 ; GFX10:       ; %bb.0: ; %main_body
@@ -123,16 +123,26 @@ define <4 x float> @non_preserved_vgpr_tuple8(<8 x i32> %rsrc, <4 x i32> %samp,
 ; GFX10-NEXT:    buffer_load_dword v43, off, s[0:3], s33 offset:4
 ; GFX10-NEXT:    buffer_load_dword v42, off, s[0:3], s33 offset:8
 ; GFX10-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:12
-; GFX10-NEXT:    v_readlane_b32 s4, v40, 0
-; GFX10-NEXT:    v_readlane_b32 s5, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 2
-; GFX10-NEXT:    s_or_saveexec_b32 s6, -1
+; GFX10-NEXT:    s_or_saveexec_b32 s4, -1
 ; GFX10-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX10-NEXT:    s_mov_b32 exec_lo, s6
+; GFX10-NEXT:    s_mov_b32 exec_lo, s4
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    s_setpc_b64 s[4:5]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+
+
+
+
+
+
+
+
+
+
 main_body:
   call void asm sideeffect "", "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7}"() #0
   call void asm sideeffect "", "~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15}"() #0
@@ -155,21 +165,23 @@ define <4 x float> @call_preserved_vgpr_tuple8(<8 x i32> %rsrc, <4 x i32> %samp,
 ; GFX9-NEXT:    buffer_store_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
 ; GFX9-NEXT:    s_mov_b64 exec, s[4:5]
 ; GFX9-NEXT:    v_writelane_b32 v40, s33, 10
-; GFX9-NEXT:    v_writelane_b32 v40, s36, 0
-; GFX9-NEXT:    v_writelane_b32 v40, s37, 1
-; GFX9-NEXT:    v_writelane_b32 v40, s38, 2
-; GFX9-NEXT:    v_writelane_b32 v40, s39, 3
-; GFX9-NEXT:    v_writelane_b32 v40, s40, 4
-; GFX9-NEXT:    v_writelane_b32 v40, s41, 5
+; GFX9-NEXT:    v_writelane_b32 v40, s30, 0
+; GFX9-NEXT:    v_writelane_b32 v40, s31, 1
+; GFX9-NEXT:    v_writelane_b32 v40, s36, 2
+; GFX9-NEXT:    v_writelane_b32 v40, s37, 3
+; GFX9-NEXT:    v_writelane_b32 v40, s38, 4
+; GFX9-NEXT:    v_writelane_b32 v40, s39, 5
+; GFX9-NEXT:    v_writelane_b32 v40, s40, 6
+; GFX9-NEXT:    v_writelane_b32 v40, s41, 7
 ; GFX9-NEXT:    s_mov_b32 s33, s32
-; GFX9-NEXT:    v_writelane_b32 v40, s42, 6
+; GFX9-NEXT:    v_writelane_b32 v40, s42, 8
 ; GFX9-NEXT:    s_mov_b32 s36, 0
 ; GFX9-NEXT:    buffer_store_dword v41, off, s[0:3], s33 offset:16 ; 4-byte Folded Spill
 ; GFX9-NEXT:    buffer_store_dword v42, off, s[0:3], s33 offset:12 ; 4-byte Folded Spill
 ; GFX9-NEXT:    buffer_store_dword v43, off, s[0:3], s33 offset:8 ; 4-byte Folded Spill
 ; GFX9-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
 ; GFX9-NEXT:    buffer_store_dword v45, off, s[0:3], s33 ; 4-byte Folded Spill
-; GFX9-NEXT:    v_writelane_b32 v40, s43, 7
+; GFX9-NEXT:    v_writelane_b32 v40, s43, 9
 ; GFX9-NEXT:    v_mov_b32_e32 v45, v16
 ; GFX9-NEXT:    v_mov_b32_e32 v44, v15
 ; GFX9-NEXT:    v_mov_b32_e32 v43, v14
@@ -188,8 +200,6 @@ define <4 x float> @call_preserved_vgpr_tuple8(<8 x i32> %rsrc, <4 x i32> %samp,
 ; GFX9-NEXT:    s_add_u32 s4, s4, extern_func at gotpcrel32@lo+4
 ; GFX9-NEXT:    s_addc_u32 s5, s5, extern_func at gotpcrel32@hi+12
 ; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
-; GFX9-NEXT:    v_writelane_b32 v40, s30, 8
-; GFX9-NEXT:    v_writelane_b32 v40, s31, 9
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
@@ -201,23 +211,23 @@ define <4 x float> @call_preserved_vgpr_tuple8(<8 x i32> %rsrc, <4 x i32> %samp,
 ; GFX9-NEXT:    buffer_load_dword v43, off, s[0:3], s33 offset:8 ; 4-byte Folded Reload
 ; GFX9-NEXT:    buffer_load_dword v42, off, s[0:3], s33 offset:12 ; 4-byte Folded Reload
 ; GFX9-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:16 ; 4-byte Folded Reload
-; GFX9-NEXT:    v_readlane_b32 s4, v40, 8
-; GFX9-NEXT:    v_readlane_b32 s5, v40, 9
-; GFX9-NEXT:    v_readlane_b32 s43, v40, 7
-; GFX9-NEXT:    v_readlane_b32 s42, v40, 6
-; GFX9-NEXT:    v_readlane_b32 s41, v40, 5
-; GFX9-NEXT:    v_readlane_b32 s40, v40, 4
-; GFX9-NEXT:    v_readlane_b32 s39, v40, 3
-; GFX9-NEXT:    v_readlane_b32 s38, v40, 2
-; GFX9-NEXT:    v_readlane_b32 s37, v40, 1
-; GFX9-NEXT:    v_readlane_b32 s36, v40, 0
+; GFX9-NEXT:    v_readlane_b32 s43, v40, 9
+; GFX9-NEXT:    v_readlane_b32 s42, v40, 8
+; GFX9-NEXT:    v_readlane_b32 s41, v40, 7
+; GFX9-NEXT:    v_readlane_b32 s40, v40, 6
+; GFX9-NEXT:    v_readlane_b32 s39, v40, 5
+; GFX9-NEXT:    v_readlane_b32 s38, v40, 4
+; GFX9-NEXT:    v_readlane_b32 s37, v40, 3
+; GFX9-NEXT:    v_readlane_b32 s36, v40, 2
+; GFX9-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX9-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX9-NEXT:    s_addk_i32 s32, 0xf800
 ; GFX9-NEXT:    v_readlane_b32 s33, v40, 10
-; GFX9-NEXT:    s_or_saveexec_b64 s[6:7], -1
+; GFX9-NEXT:    s_or_saveexec_b64 s[4:5], -1
 ; GFX9-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-NEXT:    s_mov_b64 exec, s[6:7]
+; GFX9-NEXT:    s_mov_b64 exec, s[4:5]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    s_setpc_b64 s[4:5]
+; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: call_preserved_vgpr_tuple8:
 ; GFX10:       ; %bb.0: ; %main_body
@@ -235,35 +245,35 @@ define <4 x float> @call_preserved_vgpr_tuple8(<8 x i32> %rsrc, <4 x i32> %samp,
 ; GFX10-NEXT:    buffer_store_dword v44, off, s[0:3], s33 offset:4 ; 4-byte Folded Spill
 ; GFX10-NEXT:    buffer_store_dword v45, off, s[0:3], s33 ; 4-byte Folded Spill
 ; GFX10-NEXT:    s_addk_i32 s32, 0x400
-; GFX10-NEXT:    v_writelane_b32 v40, s36, 0
-; GFX10-NEXT:    s_mov_b32 s36, 0
+; GFX10-NEXT:    v_writelane_b32 v40, s30, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v41, v16
 ; GFX10-NEXT:    v_mov_b32_e32 v42, v15
 ; GFX10-NEXT:    v_mov_b32_e32 v43, v14
-; GFX10-NEXT:    v_writelane_b32 v40, s37, 1
-; GFX10-NEXT:    s_mov_b32 s37, s36
 ; GFX10-NEXT:    v_mov_b32_e32 v44, v13
+; GFX10-NEXT:    v_writelane_b32 v40, s31, 1
 ; GFX10-NEXT:    v_mov_b32_e32 v45, v12
-; GFX10-NEXT:    v_writelane_b32 v40, s38, 2
+; GFX10-NEXT:    v_writelane_b32 v40, s36, 2
+; GFX10-NEXT:    s_mov_b32 s36, 0
+; GFX10-NEXT:    v_writelane_b32 v40, s37, 3
+; GFX10-NEXT:    s_mov_b32 s37, s36
+; GFX10-NEXT:    v_writelane_b32 v40, s38, 4
 ; GFX10-NEXT:    s_mov_b32 s38, s36
-; GFX10-NEXT:    v_writelane_b32 v40, s39, 3
+; GFX10-NEXT:    v_writelane_b32 v40, s39, 5
 ; GFX10-NEXT:    s_mov_b32 s39, s36
-; GFX10-NEXT:    v_writelane_b32 v40, s40, 4
+; GFX10-NEXT:    v_writelane_b32 v40, s40, 6
 ; GFX10-NEXT:    s_mov_b32 s40, s36
-; GFX10-NEXT:    v_writelane_b32 v40, s41, 5
+; GFX10-NEXT:    v_writelane_b32 v40, s41, 7
 ; GFX10-NEXT:    s_mov_b32 s41, s36
-; GFX10-NEXT:    v_writelane_b32 v40, s42, 6
+; GFX10-NEXT:    v_writelane_b32 v40, s42, 8
 ; GFX10-NEXT:    s_mov_b32 s42, s36
-; GFX10-NEXT:    v_writelane_b32 v40, s43, 7
+; GFX10-NEXT:    v_writelane_b32 v40, s43, 9
 ; GFX10-NEXT:    s_mov_b32 s43, s36
 ; GFX10-NEXT:    image_gather4_c_b_cl v[0:3], v[12:16], s[36:43], s[4:7] dmask:0x1 dim:SQ_RSRC_IMG_2D
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_getpc_b64 s[4:5]
 ; GFX10-NEXT:    s_add_u32 s4, s4, extern_func at gotpcrel32@lo+4
 ; GFX10-NEXT:    s_addc_u32 s5, s5, extern_func at gotpcrel32@hi+12
-; GFX10-NEXT:    v_writelane_b32 v40, s30, 8
 ; GFX10-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
-; GFX10-NEXT:    v_writelane_b32 v40, s31, 9
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
@@ -275,24 +285,31 @@ define <4 x float> @call_preserved_vgpr_tuple8(<8 x i32> %rsrc, <4 x i32> %samp,
 ; GFX10-NEXT:    buffer_load_dword v43, off, s[0:3], s33 offset:8
 ; GFX10-NEXT:    buffer_load_dword v42, off, s[0:3], s33 offset:12
 ; GFX10-NEXT:    buffer_load_dword v41, off, s[0:3], s33 offset:16
-; GFX10-NEXT:    v_readlane_b32 s4, v40, 8
-; GFX10-NEXT:    v_readlane_b32 s5, v40, 9
-; GFX10-NEXT:    v_readlane_b32 s43, v40, 7
-; GFX10-NEXT:    v_readlane_b32 s42, v40, 6
-; GFX10-NEXT:    v_readlane_b32 s41, v40, 5
-; GFX10-NEXT:    v_readlane_b32 s40, v40, 4
-; GFX10-NEXT:    v_readlane_b32 s39, v40, 3
-; GFX10-NEXT:    v_readlane_b32 s38, v40, 2
-; GFX10-NEXT:    v_readlane_b32 s37, v40, 1
-; GFX10-NEXT:    v_readlane_b32 s36, v40, 0
+; GFX10-NEXT:    v_readlane_b32 s43, v40, 9
+; GFX10-NEXT:    v_readlane_b32 s42, v40, 8
+; GFX10-NEXT:    v_readlane_b32 s41, v40, 7
+; GFX10-NEXT:    v_readlane_b32 s40, v40, 6
+; GFX10-NEXT:    v_readlane_b32 s39, v40, 5
+; GFX10-NEXT:    v_readlane_b32 s38, v40, 4
+; GFX10-NEXT:    v_readlane_b32 s37, v40, 3
+; GFX10-NEXT:    v_readlane_b32 s36, v40, 2
+; GFX10-NEXT:    v_readlane_b32 s31, v40, 1
+; GFX10-NEXT:    v_readlane_b32 s30, v40, 0
 ; GFX10-NEXT:    s_addk_i32 s32, 0xfc00
 ; GFX10-NEXT:    v_readlane_b32 s33, v40, 10
-; GFX10-NEXT:    s_or_saveexec_b32 s6, -1
+; GFX10-NEXT:    s_or_saveexec_b32 s4, -1
 ; GFX10-NEXT:    buffer_load_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX10-NEXT:    s_mov_b32 exec_lo, s6
+; GFX10-NEXT:    s_mov_b32 exec_lo, s4
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    s_setpc_b64 s[4:5]
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+
+
+
+
+
+
+
 main_body:
   %v = call <4 x float> @llvm.amdgcn.image.gather4.c.b.cl.2d.v4f32.f32.f32(i32 1, float %bias, float %zcompare, float %s, float %t, float %clamp, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0)
   store <4 x float> %v, <4 x float> addrspace(1)* undef

diff  --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index ab70a39345edf..ec91016353a64 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -1134,8 +1134,8 @@ declare void @external_void_func_void() #1
 ; GCN-DAG: v_writelane_b32 v40, s30, 0
 ; GCN-DAG: v_writelane_b32 v40, s31, 1
 ; GCN: s_swappc_b64
-; GCN-DAG: v_readlane_b32 s4, v40, 0
-; GCN-DAG: v_readlane_b32 s5, v40, 1
+; GCN-DAG: v_readlane_b32 s30, v40, 0
+; GCN-DAG: v_readlane_b32 s31, v40, 1
 
 
 ; GFX1064: s_addk_i32 s32, 0xfc00

diff  --git a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
index a39bed76cbfe0..b224242f86e06 100644
--- a/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/wwm-reserved-spill.ll
@@ -137,8 +137,6 @@ define amdgpu_gfx void @strict_wwm_cfg(<4 x i32> inreg %tmp14, i32 %arg) {
 ; GFX9-O0-NEXT:    buffer_store_dword v1, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    buffer_store_dword v2, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    s_mov_b64 exec, s[34:35]
-; GFX9-O0-NEXT:    v_writelane_b32 v5, s30, 0
-; GFX9-O0-NEXT:    v_writelane_b32 v5, s31, 1
 ; GFX9-O0-NEXT:    s_mov_b32 s36, s4
 ; GFX9-O0-NEXT:    ; kill: def $sgpr36 killed $sgpr36 def $sgpr36_sgpr37_sgpr38_sgpr39
 ; GFX9-O0-NEXT:    s_mov_b32 s37, s5
@@ -146,37 +144,37 @@ define amdgpu_gfx void @strict_wwm_cfg(<4 x i32> inreg %tmp14, i32 %arg) {
 ; GFX9-O0-NEXT:    s_mov_b32 s39, s7
 ; GFX9-O0-NEXT:    s_mov_b64 s[42:43], s[38:39]
 ; GFX9-O0-NEXT:    s_mov_b64 s[40:41], s[36:37]
-; GFX9-O0-NEXT:    v_writelane_b32 v5, s40, 2
-; GFX9-O0-NEXT:    v_writelane_b32 v5, s41, 3
-; GFX9-O0-NEXT:    v_writelane_b32 v5, s42, 4
-; GFX9-O0-NEXT:    v_writelane_b32 v5, s43, 5
-; GFX9-O0-NEXT:    s_mov_b32 s30, 0
-; GFX9-O0-NEXT:    buffer_load_dwordx2 v[3:4], off, s[36:39], s30
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s40, 0
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s41, 1
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s42, 2
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s43, 3
+; GFX9-O0-NEXT:    s_mov_b32 s34, 0
+; GFX9-O0-NEXT:    buffer_load_dwordx2 v[3:4], off, s[36:39], s34
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-O0-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-O0-NEXT:    buffer_store_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    ; implicit-def: $sgpr34_sgpr35
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr36_sgpr37
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v3
 ; GFX9-O0-NEXT:    s_not_b64 exec, exec
-; GFX9-O0-NEXT:    v_mov_b32_e32 v1, s30
+; GFX9-O0-NEXT:    v_mov_b32_e32 v1, s34
 ; GFX9-O0-NEXT:    s_not_b64 exec, exec
-; GFX9-O0-NEXT:    s_or_saveexec_b64 s[34:35], -1
-; GFX9-O0-NEXT:    v_mov_b32_e32 v2, s30
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[36:37], -1
+; GFX9-O0-NEXT:    v_mov_b32_e32 v2, s34
 ; GFX9-O0-NEXT:    s_nop 1
 ; GFX9-O0-NEXT:    v_mov_b32_dpp v2, v1 row_bcast:31 row_mask:0xc bank_mask:0xf
 ; GFX9-O0-NEXT:    v_add_u32_e64 v1, v1, v2
-; GFX9-O0-NEXT:    s_mov_b64 exec, s[34:35]
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[36:37]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v3, v1
 ; GFX9-O0-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    v_cmp_eq_u32_e64 s[34:35], v0, s30
-; GFX9-O0-NEXT:    v_mov_b32_e32 v0, s30
+; GFX9-O0-NEXT:    v_cmp_eq_u32_e64 s[36:37], v0, s34
+; GFX9-O0-NEXT:    v_mov_b32_e32 v0, s34
 ; GFX9-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
-; GFX9-O0-NEXT:    s_mov_b64 s[30:31], exec
-; GFX9-O0-NEXT:    v_writelane_b32 v5, s30, 6
-; GFX9-O0-NEXT:    v_writelane_b32 v5, s31, 7
-; GFX9-O0-NEXT:    s_and_b64 s[30:31], s[30:31], s[34:35]
-; GFX9-O0-NEXT:    s_mov_b64 exec, s[30:31]
+; GFX9-O0-NEXT:    s_mov_b64 s[34:35], exec
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s34, 4
+; GFX9-O0-NEXT:    v_writelane_b32 v5, s35, 5
+; GFX9-O0-NEXT:    s_and_b64 s[34:35], s[34:35], s[36:37]
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[34:35]
 ; GFX9-O0-NEXT:    s_cbranch_execz .LBB1_2
 ; GFX9-O0-NEXT:  ; %bb.1: ; %if
 ; GFX9-O0-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
@@ -184,29 +182,27 @@ define amdgpu_gfx void @strict_wwm_cfg(<4 x i32> inreg %tmp14, i32 %arg) {
 ; GFX9-O0-NEXT:    buffer_load_dword v4, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v4
-; GFX9-O0-NEXT:    s_or_saveexec_b64 s[30:31], -1
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-O0-NEXT:    s_mov_b64 exec, s[30:31]
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[34:35]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v0
 ; GFX9-O0-NEXT:    s_not_b64 exec, exec
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v1
 ; GFX9-O0-NEXT:    s_not_b64 exec, exec
-; GFX9-O0-NEXT:    s_or_saveexec_b64 s[30:31], -1
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GFX9-O0-NEXT:    v_mov_b32_dpp v1, v2 row_bcast:31 row_mask:0xc bank_mask:0xf
 ; GFX9-O0-NEXT:    v_add_u32_e64 v1, v2, v1
-; GFX9-O0-NEXT:    s_mov_b64 exec, s[30:31]
+; GFX9-O0-NEXT:    s_mov_b64 exec, s[34:35]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v1
 ; GFX9-O0-NEXT:    buffer_store_dword v0, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
 ; GFX9-O0-NEXT:  .LBB1_2: ; %merge
-; GFX9-O0-NEXT:    v_readlane_b32 s34, v5, 6
-; GFX9-O0-NEXT:    v_readlane_b32 s35, v5, 7
+; GFX9-O0-NEXT:    v_readlane_b32 s34, v5, 4
+; GFX9-O0-NEXT:    v_readlane_b32 s35, v5, 5
 ; GFX9-O0-NEXT:    s_or_b64 exec, exec, s[34:35]
-; GFX9-O0-NEXT:    v_readlane_b32 s30, v5, 0
-; GFX9-O0-NEXT:    v_readlane_b32 s31, v5, 1
-; GFX9-O0-NEXT:    v_readlane_b32 s36, v5, 2
-; GFX9-O0-NEXT:    v_readlane_b32 s37, v5, 3
-; GFX9-O0-NEXT:    v_readlane_b32 s38, v5, 4
-; GFX9-O0-NEXT:    v_readlane_b32 s39, v5, 5
+; GFX9-O0-NEXT:    v_readlane_b32 s36, v5, 0
+; GFX9-O0-NEXT:    v_readlane_b32 s37, v5, 1
+; GFX9-O0-NEXT:    v_readlane_b32 s38, v5, 2
+; GFX9-O0-NEXT:    v_readlane_b32 s39, v5, 3
 ; GFX9-O0-NEXT:    buffer_load_dword v0, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
 ; GFX9-O0-NEXT:    s_waitcnt vmcnt(0)
@@ -357,22 +353,22 @@ define amdgpu_gfx void @strict_wwm_call(<4 x i32> inreg %tmp14, i32 inreg %arg)
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v2, s34
 ; GFX9-O0-NEXT:    s_not_b64 exec, exec
 ; GFX9-O0-NEXT:    s_or_saveexec_b64 s[40:41], -1
-; GFX9-O0-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-O0-NEXT:    s_add_u32 s30, s30, strict_wwm_called at rel32@lo+4
-; GFX9-O0-NEXT:    s_addc_u32 s31, s31, strict_wwm_called at rel32@hi+12
+; GFX9-O0-NEXT:    s_getpc_b64 s[42:43]
+; GFX9-O0-NEXT:    s_add_u32 s42, s42, strict_wwm_called at rel32@lo+4
+; GFX9-O0-NEXT:    s_addc_u32 s43, s43, strict_wwm_called at rel32@hi+12
 ; GFX9-O0-NEXT:    s_mov_b64 s[46:47], s[2:3]
 ; GFX9-O0-NEXT:    s_mov_b64 s[44:45], s[0:1]
 ; GFX9-O0-NEXT:    s_mov_b64 s[0:1], s[44:45]
 ; GFX9-O0-NEXT:    s_mov_b64 s[2:3], s[46:47]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v2
-; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[30:31]
-; GFX9-O0-NEXT:    v_readlane_b32 s30, v3, 0
-; GFX9-O0-NEXT:    v_readlane_b32 s31, v3, 1
+; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[42:43]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v0
 ; GFX9-O0-NEXT:    v_add_u32_e64 v1, v1, v2
 ; GFX9-O0-NEXT:    s_mov_b64 exec, s[40:41]
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v1
 ; GFX9-O0-NEXT:    buffer_store_dword v0, off, s[36:39], s34 offset:4
+; GFX9-O0-NEXT:    v_readlane_b32 s31, v3, 1
+; GFX9-O0-NEXT:    v_readlane_b32 s30, v3, 0
 ; GFX9-O0-NEXT:    s_add_i32 s32, s32, 0xfffffc00
 ; GFX9-O0-NEXT:    v_readlane_b32 s33, v3, 2
 ; GFX9-O0-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -387,36 +383,41 @@ define amdgpu_gfx void @strict_wwm_call(<4 x i32> inreg %tmp14, i32 inreg %arg)
 ; GFX9-O3:       ; %bb.0:
 ; GFX9-O3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-O3-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-O3-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
 ; GFX9-O3-NEXT:    buffer_store_dword v2, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX9-O3-NEXT:    buffer_store_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
 ; GFX9-O3-NEXT:    s_mov_b64 exec, s[34:35]
-; GFX9-O3-NEXT:    s_mov_b32 s38, s33
+; GFX9-O3-NEXT:    v_writelane_b32 v3, s33, 2
+; GFX9-O3-NEXT:    v_writelane_b32 v3, s30, 0
 ; GFX9-O3-NEXT:    s_mov_b32 s33, s32
 ; GFX9-O3-NEXT:    s_addk_i32 s32, 0x400
-; GFX9-O3-NEXT:    s_mov_b64 s[36:37], s[30:31]
+; GFX9-O3-NEXT:    v_writelane_b32 v3, s31, 1
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v2, s8
 ; GFX9-O3-NEXT:    s_not_b64 exec, exec
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-O3-NEXT:    s_not_b64 exec, exec
 ; GFX9-O3-NEXT:    s_or_saveexec_b64 s[34:35], -1
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v0, v2
-; GFX9-O3-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-O3-NEXT:    s_add_u32 s30, s30, strict_wwm_called at rel32@lo+4
-; GFX9-O3-NEXT:    s_addc_u32 s31, s31, strict_wwm_called at rel32@hi+12
-; GFX9-O3-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX9-O3-NEXT:    s_getpc_b64 s[36:37]
+; GFX9-O3-NEXT:    s_add_u32 s36, s36, strict_wwm_called at rel32@lo+4
+; GFX9-O3-NEXT:    s_addc_u32 s37, s37, strict_wwm_called at rel32@hi+12
+; GFX9-O3-NEXT:    s_swappc_b64 s[30:31], s[36:37]
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v1, v0
 ; GFX9-O3-NEXT:    v_add_u32_e32 v1, v1, v2
 ; GFX9-O3-NEXT:    s_mov_b64 exec, s[34:35]
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v0, v1
 ; GFX9-O3-NEXT:    buffer_store_dword v0, off, s[4:7], 0 offset:4
+; GFX9-O3-NEXT:    v_readlane_b32 s31, v3, 1
+; GFX9-O3-NEXT:    v_readlane_b32 s30, v3, 0
 ; GFX9-O3-NEXT:    s_addk_i32 s32, 0xfc00
-; GFX9-O3-NEXT:    s_mov_b32 s33, s38
-; GFX9-O3-NEXT:    s_or_saveexec_b64 s[30:31], -1
+; GFX9-O3-NEXT:    v_readlane_b32 s33, v3, 2
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-O3-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
 ; GFX9-O3-NEXT:    buffer_load_dword v2, off, s[0:3], s32 ; 4-byte Folded Reload
 ; GFX9-O3-NEXT:    buffer_load_dword v1, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
-; GFX9-O3-NEXT:    s_mov_b64 exec, s[30:31]
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[34:35]
 ; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-O3-NEXT:    s_setpc_b64 s[36:37]
+; GFX9-O3-NEXT:    s_setpc_b64 s[30:31]
   %tmp107 = tail call i32 @llvm.amdgcn.set.inactive.i32(i32 %arg, i32 0)
   %tmp134 = call amdgpu_gfx i32 @strict_wwm_called(i32 %tmp107)
   %tmp136 = add i32 %tmp134, %tmp107
@@ -530,7 +531,7 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
 ; GFX9-O0-NEXT:    s_add_i32 s32, s32, 0xc00
 ; GFX9-O0-NEXT:    v_writelane_b32 v10, s30, 0
 ; GFX9-O0-NEXT:    v_writelane_b32 v10, s31, 1
-; GFX9-O0-NEXT:    s_mov_b32 s30, s8
+; GFX9-O0-NEXT:    s_mov_b32 s34, s8
 ; GFX9-O0-NEXT:    s_mov_b32 s36, s4
 ; GFX9-O0-NEXT:    ; kill: def $sgpr36 killed $sgpr36 def $sgpr36_sgpr37_sgpr38_sgpr39
 ; GFX9-O0-NEXT:    s_mov_b32 s37, s5
@@ -540,27 +541,27 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
 ; GFX9-O0-NEXT:    v_writelane_b32 v10, s37, 3
 ; GFX9-O0-NEXT:    v_writelane_b32 v10, s38, 4
 ; GFX9-O0-NEXT:    v_writelane_b32 v10, s39, 5
-; GFX9-O0-NEXT:    ; kill: def $sgpr30 killed $sgpr30 def $sgpr30_sgpr31
-; GFX9-O0-NEXT:    s_mov_b32 s31, s9
-; GFX9-O0-NEXT:    ; kill: def $sgpr34_sgpr35 killed $sgpr30_sgpr31
-; GFX9-O0-NEXT:    s_mov_b64 s[34:35], 0
-; GFX9-O0-NEXT:    v_mov_b32_e32 v8, s30
-; GFX9-O0-NEXT:    v_mov_b32_e32 v9, s31
-; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    ; kill: def $sgpr34 killed $sgpr34 def $sgpr34_sgpr35
+; GFX9-O0-NEXT:    s_mov_b32 s35, s9
+; GFX9-O0-NEXT:    ; kill: def $sgpr40_sgpr41 killed $sgpr34_sgpr35
+; GFX9-O0-NEXT:    s_mov_b64 s[36:37], 0
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v8, s34
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v9, s35
 ; GFX9-O0-NEXT:    s_not_b64 exec, exec
-; GFX9-O0-NEXT:    s_or_saveexec_b64 s[30:31], -1
-; GFX9-O0-NEXT:    v_writelane_b32 v10, s30, 6
-; GFX9-O0-NEXT:    v_writelane_b32 v10, s31, 7
+; GFX9-O0-NEXT:    v_mov_b32_e32 v8, s36
+; GFX9-O0-NEXT:    v_mov_b32_e32 v9, s37
+; GFX9-O0-NEXT:    s_not_b64 exec, exec
+; GFX9-O0-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-O0-NEXT:    v_writelane_b32 v10, s34, 6
+; GFX9-O0-NEXT:    v_writelane_b32 v10, s35, 7
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v8
-; GFX9-O0-NEXT:    s_mov_b32 s30, 32
-; GFX9-O0-NEXT:    ; implicit-def: $sgpr34_sgpr35
-; GFX9-O0-NEXT:    v_lshrrev_b64 v[3:4], s30, v[8:9]
-; GFX9-O0-NEXT:    s_getpc_b64 s[30:31]
-; GFX9-O0-NEXT:    s_add_u32 s30, s30, strict_wwm_called_i64 at gotpcrel32@lo+4
-; GFX9-O0-NEXT:    s_addc_u32 s31, s31, strict_wwm_called_i64 at gotpcrel32@hi+12
-; GFX9-O0-NEXT:    s_load_dwordx2 s[30:31], s[30:31], 0x0
+; GFX9-O0-NEXT:    s_mov_b32 s34, 32
+; GFX9-O0-NEXT:    ; implicit-def: $sgpr36_sgpr37
+; GFX9-O0-NEXT:    v_lshrrev_b64 v[3:4], s34, v[8:9]
+; GFX9-O0-NEXT:    s_getpc_b64 s[34:35]
+; GFX9-O0-NEXT:    s_add_u32 s34, s34, strict_wwm_called_i64 at gotpcrel32@lo+4
+; GFX9-O0-NEXT:    s_addc_u32 s35, s35, strict_wwm_called_i64 at gotpcrel32@hi+12
+; GFX9-O0-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
 ; GFX9-O0-NEXT:    s_mov_b64 s[38:39], s[2:3]
 ; GFX9-O0-NEXT:    s_mov_b64 s[36:37], s[0:1]
 ; GFX9-O0-NEXT:    s_mov_b64 s[0:1], s[36:37]
@@ -568,15 +569,13 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v0, v2
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v3
 ; GFX9-O0-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[30:31]
+; GFX9-O0-NEXT:    s_swappc_b64 s[30:31], s[34:35]
 ; GFX9-O0-NEXT:    v_readlane_b32 s34, v10, 6
 ; GFX9-O0-NEXT:    v_readlane_b32 s35, v10, 7
 ; GFX9-O0-NEXT:    v_readlane_b32 s36, v10, 2
 ; GFX9-O0-NEXT:    v_readlane_b32 s37, v10, 3
 ; GFX9-O0-NEXT:    v_readlane_b32 s38, v10, 4
 ; GFX9-O0-NEXT:    v_readlane_b32 s39, v10, 5
-; GFX9-O0-NEXT:    v_readlane_b32 s30, v10, 0
-; GFX9-O0-NEXT:    v_readlane_b32 s31, v10, 1
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v2, v0
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v3, v1
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v4, v8
@@ -588,6 +587,8 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
 ; GFX9-O0-NEXT:    v_mov_b32_e32 v1, v3
 ; GFX9-O0-NEXT:    s_mov_b32 s34, 0
 ; GFX9-O0-NEXT:    buffer_store_dwordx2 v[0:1], off, s[36:39], s34 offset:4
+; GFX9-O0-NEXT:    v_readlane_b32 s31, v10, 1
+; GFX9-O0-NEXT:    v_readlane_b32 s30, v10, 0
 ; GFX9-O0-NEXT:    s_add_i32 s32, s32, 0xfffff400
 ; GFX9-O0-NEXT:    v_readlane_b32 s33, v10, 8
 ; GFX9-O0-NEXT:    s_or_saveexec_b64 s[34:35], -1
@@ -619,6 +620,7 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
 ; GFX9-O3:       ; %bb.0:
 ; GFX9-O3-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-O3-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-O3-NEXT:    buffer_store_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
 ; GFX9-O3-NEXT:    buffer_store_dword v6, off, s[0:3], s32 ; 4-byte Folded Spill
 ; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-O3-NEXT:    buffer_store_dword v7, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -628,16 +630,17 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
 ; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
 ; GFX9-O3-NEXT:    buffer_store_dword v3, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
 ; GFX9-O3-NEXT:    s_mov_b64 exec, s[34:35]
-; GFX9-O3-NEXT:    s_mov_b32 s40, s33
+; GFX9-O3-NEXT:    v_writelane_b32 v8, s33, 2
+; GFX9-O3-NEXT:    v_writelane_b32 v8, s30, 0
 ; GFX9-O3-NEXT:    s_mov_b32 s33, s32
 ; GFX9-O3-NEXT:    s_addk_i32 s32, 0x800
-; GFX9-O3-NEXT:    s_mov_b64 s[36:37], s[30:31]
-; GFX9-O3-NEXT:    s_or_saveexec_b64 s[30:31], -1
-; GFX9-O3-NEXT:    s_getpc_b64 s[34:35]
-; GFX9-O3-NEXT:    s_add_u32 s34, s34, strict_wwm_called_i64 at gotpcrel32@lo+4
-; GFX9-O3-NEXT:    s_addc_u32 s35, s35, strict_wwm_called_i64 at gotpcrel32@hi+12
-; GFX9-O3-NEXT:    s_load_dwordx2 s[34:35], s[34:35], 0x0
-; GFX9-O3-NEXT:    s_mov_b64 exec, s[30:31]
+; GFX9-O3-NEXT:    v_writelane_b32 v8, s31, 1
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-O3-NEXT:    s_getpc_b64 s[36:37]
+; GFX9-O3-NEXT:    s_add_u32 s36, s36, strict_wwm_called_i64 at gotpcrel32@lo+4
+; GFX9-O3-NEXT:    s_addc_u32 s37, s37, strict_wwm_called_i64 at gotpcrel32@hi+12
+; GFX9-O3-NEXT:    s_load_dwordx2 s[36:37], s[36:37], 0x0
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[34:35]
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v6, s8
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v7, s9
 ; GFX9-O3-NEXT:    s_not_b64 exec, exec
@@ -648,7 +651,7 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v0, v6
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v1, v7
 ; GFX9-O3-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-O3-NEXT:    s_swappc_b64 s[30:31], s[34:35]
+; GFX9-O3-NEXT:    s_swappc_b64 s[30:31], s[36:37]
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v2, v0
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v3, v1
 ; GFX9-O3-NEXT:    v_add_co_u32_e32 v2, vcc, v2, v6
@@ -657,9 +660,13 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v0, v2
 ; GFX9-O3-NEXT:    v_mov_b32_e32 v1, v3
 ; GFX9-O3-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0 offset:4
+; GFX9-O3-NEXT:    v_readlane_b32 s31, v8, 1
+; GFX9-O3-NEXT:    v_readlane_b32 s30, v8, 0
 ; GFX9-O3-NEXT:    s_addk_i32 s32, 0xf800
-; GFX9-O3-NEXT:    s_mov_b32 s33, s40
-; GFX9-O3-NEXT:    s_or_saveexec_b64 s[30:31], -1
+; GFX9-O3-NEXT:    v_readlane_b32 s33, v8, 2
+; GFX9-O3-NEXT:    s_or_saveexec_b64 s[34:35], -1
+; GFX9-O3-NEXT:    buffer_load_dword v8, off, s[0:3], s32 offset:24 ; 4-byte Folded Reload
+; GFX9-O3-NEXT:    s_nop 0
 ; GFX9-O3-NEXT:    buffer_load_dword v6, off, s[0:3], s32 ; 4-byte Folded Reload
 ; GFX9-O3-NEXT:    s_nop 0
 ; GFX9-O3-NEXT:    buffer_load_dword v7, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
@@ -670,9 +677,9 @@ define amdgpu_gfx void @strict_wwm_call_i64(<4 x i32> inreg %tmp14, i64 inreg %a
 ; GFX9-O3-NEXT:    buffer_load_dword v2, off, s[0:3], s32 offset:16 ; 4-byte Folded Reload
 ; GFX9-O3-NEXT:    s_nop 0
 ; GFX9-O3-NEXT:    buffer_load_dword v3, off, s[0:3], s32 offset:20 ; 4-byte Folded Reload
-; GFX9-O3-NEXT:    s_mov_b64 exec, s[30:31]
+; GFX9-O3-NEXT:    s_mov_b64 exec, s[34:35]
 ; GFX9-O3-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-O3-NEXT:    s_setpc_b64 s[36:37]
+; GFX9-O3-NEXT:    s_setpc_b64 s[30:31]
   %tmp107 = tail call i64 @llvm.amdgcn.set.inactive.i64(i64 %arg, i64 0)
   %tmp134 = call amdgpu_gfx i64 @strict_wwm_called_i64(i64 %tmp107)
   %tmp136 = add i64 %tmp134, %tmp107

diff  --git a/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata.mir b/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata.mir
index a9fbe15b244e9..0eaf663450425 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/machine-metadata.mir
@@ -77,10 +77,9 @@ machineMetadataNodes:
   - '!7 = distinct !{!7, !"MemcpyLoweringDomain"}'
 body:             |
   bb.0 (%ir-block.0):
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
 
     ; CHECK-LABEL: name: test_memcpy
-    ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr30_sgpr31
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr3
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
     ; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
@@ -96,11 +95,8 @@ body:             |
     ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub0
     ; CHECK: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub1
     ; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 killed [[COPY8]], killed [[COPY9]], 0, implicit $exec
-    ; CHECK: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
     ; CHECK: $vgpr0 = COPY [[V_ADD_U32_e64_]]
-    ; CHECK: [[COPY11:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY10]]
-    ; CHECK: S_SETPC_B64_return [[COPY11]], implicit $vgpr0
-    %4:sreg_64 = COPY $sgpr30_sgpr31
+    ; CHECK: SI_RETURN implicit $vgpr0
     %3:vgpr_32 = COPY $vgpr3
     %2:vgpr_32 = COPY $vgpr2
     %1:vgpr_32 = COPY $vgpr1
@@ -116,10 +112,8 @@ body:             |
     %13:vgpr_32 = COPY %11.sub0
     %14:vgpr_32 = COPY %11.sub1
     %15:vgpr_32 = V_ADD_U32_e64 killed %13, killed %14, 0, implicit $exec
-    %5:ccr_sgpr_64 = COPY %4
     $vgpr0 = COPY %15
-    %16:ccr_sgpr_64 = COPY %5
-    S_SETPC_B64_return %16, implicit $vgpr0
+    SI_RETURN implicit $vgpr0
 
 ...
 ---
@@ -134,10 +128,9 @@ machineMetadataNodes:
   - '!10 = !{!1, !9}'
 body:             |
   bb.0 (%ir-block.0):
-    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
 
     ; CHECK-LABEL: name: test_memcpy_inline
-    ; CHECK: [[COPY:%[0-9]+]]:sreg_64 = COPY $sgpr30_sgpr31
     ; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr3
     ; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
     ; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
@@ -153,11 +146,8 @@ body:             |
     ; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub0
     ; CHECK: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub1
     ; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 killed [[COPY8]], killed [[COPY9]], 0, implicit $exec
-    ; CHECK: [[COPY10:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY]]
     ; CHECK: $vgpr0 = COPY [[V_ADD_U32_e64_]]
-    ; CHECK: [[COPY11:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY10]]
-    ; CHECK: S_SETPC_B64_return [[COPY11]], implicit $vgpr0
-    %4:sreg_64 = COPY $sgpr30_sgpr31
+    ; CHECK: SI_RETURN implicit $vgpr0
     %3:vgpr_32 = COPY $vgpr3
     %2:vgpr_32 = COPY $vgpr2
     %1:vgpr_32 = COPY $vgpr1
@@ -173,9 +163,7 @@ body:             |
     %13:vgpr_32 = COPY %11.sub0
     %14:vgpr_32 = COPY %11.sub1
     %15:vgpr_32 = V_ADD_U32_e64 killed %13, killed %14, 0, implicit $exec
-    %5:ccr_sgpr_64 = COPY %4
     $vgpr0 = COPY %15
-    %16:ccr_sgpr_64 = COPY %5
-    S_SETPC_B64_return %16, implicit $vgpr0
+    SI_RETURN implicit $vgpr0
 
 ...

diff  --git a/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir b/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir
index 7b2ef70da2d66..6e9f4dacc56c0 100644
--- a/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir
+++ b/llvm/test/CodeGen/MIR/AMDGPU/stack-id-assert.mir
@@ -17,11 +17,9 @@
 
 ---
 name:            foo
-liveins:
-  - { reg: '$sgpr30_sgpr31', virtual-reg: '' }
 body:             |
   bb.0:
-    S_SETPC_B64_return killed renamable $sgpr30_sgpr31
+    SI_RETURN
 
 ...
 ---
@@ -31,7 +29,6 @@ liveins:
   - { reg: '$vgpr0', virtual-reg: '' }
   - { reg: '$vgpr1', virtual-reg: '' }
   - { reg: '$vgpr2', virtual-reg: '' }
-  - { reg: '$sgpr30_sgpr31', virtual-reg: '' }
 stack:
   - { id: 0, name: '', type: spill-slot, offset: 0, size: 8, alignment: 4,
       stack-id: sgpr-spill, callee-saved-register: '', callee-saved-restored: true,
@@ -41,7 +38,7 @@ machineFunctionInfo:
   stackPtrOffsetReg: '$sgpr32'
 body:             |
   bb.0:
-    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $vgpr2
 
     renamable $vgpr41 = COPY $vgpr2, implicit $exec
     renamable $vgpr40 = COPY $vgpr1, implicit $exec
@@ -52,12 +49,10 @@ body:             |
     ADJCALLSTACKUP 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
     renamable $sgpr4_sgpr5 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-gotprel32-lo) @foo + 4, target-flags(amdgpu-gotprel32-hi) @foo + 12, implicit-def dead $scc
     renamable $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed renamable $sgpr4_sgpr5, 0, 0
-    SI_SPILL_S64_SAVE killed renamable $sgpr30_sgpr31, %stack.0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr32
     dead $sgpr30_sgpr31 = SI_CALL killed renamable $sgpr4_sgpr5, @foo, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3
-    renamable $sgpr30_sgpr31 = SI_SPILL_S64_RESTORE %stack.0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr32
     ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
 
-    S_SETPC_B64_return killed renamable $sgpr30_sgpr31
+    SI_RETURN
 
 ...
 

diff  --git a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_isel.ll.expected b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_isel.ll.expected
index 6d78d97884fb5..0cc5ede932a9a 100644
--- a/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_isel.ll.expected
+++ b/llvm/test/tools/UpdateTestChecks/update_llc_test_checks/Inputs/amdgpu_isel.ll.expected
@@ -3,14 +3,12 @@
 
 define i64 @i64_test(i64 %i) nounwind readnone {
 ; CHECK-LABEL: i64_test:
-; CHECK:       SelectionDAG has 13 nodes:
+; CHECK:       SelectionDAG has 9 nodes:
 ; CHECK-NEXT:    t0: ch = EntryToken
-; CHECK-NEXT:    t11: i64,ch = CopyFromReg t0, Register:i64 %2
-; CHECK-NEXT:    t13: ch,glue = CopyToReg t0, Register:i64 %3, t11
-; CHECK-NEXT:    t15: ch,glue = CopyToReg t13, Register:i32 $vgpr0, IMPLICIT_DEF:i32, t13:1
-; CHECK-NEXT:    t21: i32 = V_MOV_B32_e32 TargetConstant:i32<0>
-; CHECK-NEXT:    t17: ch,glue = CopyToReg t15, Register:i32 $vgpr1, t21, t15:1
-; CHECK-NEXT:    t18: ch = S_SETPC_B64_return Register:i64 %3, Register:i32 $vgpr0, Register:i32 $vgpr1, t17, t17:1
+; CHECK-NEXT:    t11: ch,glue = CopyToReg t0, Register:i32 $vgpr0, IMPLICIT_DEF:i32
+; CHECK-NEXT:    t17: i32 = V_MOV_B32_e32 TargetConstant:i32<0>
+; CHECK-NEXT:    t13: ch,glue = CopyToReg t11, Register:i32 $vgpr1, t17, t11:1
+; CHECK-NEXT:    t14: ch = SI_RETURN Register:i32 $vgpr0, Register:i32 $vgpr1, t13, t13:1
 ; CHECK-EMPTY:
   %loc = alloca i64
   %j = load i64, i64 * %loc
@@ -20,14 +18,12 @@ define i64 @i64_test(i64 %i) nounwind readnone {
 
 define i64 @i32_test(i32 %i) nounwind readnone {
 ; CHECK-LABEL: i32_test:
-; CHECK:       SelectionDAG has 12 nodes:
-; CHECK-NEXT:    t0: ch = EntryToken
+; CHECK:       SelectionDAG has 8 nodes:
 ; CHECK-NEXT:    t5: i32 = V_MOV_B32_e32 TargetConstant:i32<0>
-; CHECK-NEXT:    t7: i64,ch = CopyFromReg t0, Register:i64 %1
-; CHECK-NEXT:    t9: ch,glue = CopyToReg t0, Register:i64 %2, t7
-; CHECK-NEXT:    t11: ch,glue = CopyToReg t9, Register:i32 $vgpr0, t5, t9:1
-; CHECK-NEXT:    t13: ch,glue = CopyToReg t11, Register:i32 $vgpr1, t5, t11:1
-; CHECK-NEXT:    t14: ch = S_SETPC_B64_return Register:i64 %2, Register:i32 $vgpr0, Register:i32 $vgpr1, t13, t13:1
+; CHECK-NEXT:    t0: ch = EntryToken
+; CHECK-NEXT:    t7: ch,glue = CopyToReg t0, Register:i32 $vgpr0, t5
+; CHECK-NEXT:    t9: ch,glue = CopyToReg t7, Register:i32 $vgpr1, t5, t7:1
+; CHECK-NEXT:    t10: ch = SI_RETURN Register:i32 $vgpr0, Register:i32 $vgpr1, t9, t9:1
 ; CHECK-EMPTY:
   %loc = alloca i32
   %j = load i32, i32 * %loc
@@ -38,14 +34,12 @@ define i64 @i32_test(i32 %i) nounwind readnone {
 
 define i64 @i16_test(i16 %i) nounwind readnone {
 ; CHECK-LABEL: i16_test:
-; CHECK:       SelectionDAG has 12 nodes:
-; CHECK-NEXT:    t0: ch = EntryToken
+; CHECK:       SelectionDAG has 8 nodes:
 ; CHECK-NEXT:    t5: i32 = V_MOV_B32_e32 TargetConstant:i32<0>
-; CHECK-NEXT:    t7: i64,ch = CopyFromReg t0, Register:i64 %1
-; CHECK-NEXT:    t9: ch,glue = CopyToReg t0, Register:i64 %2, t7
-; CHECK-NEXT:    t11: ch,glue = CopyToReg t9, Register:i32 $vgpr0, t5, t9:1
-; CHECK-NEXT:    t13: ch,glue = CopyToReg t11, Register:i32 $vgpr1, t5, t11:1
-; CHECK-NEXT:    t14: ch = S_SETPC_B64_return Register:i64 %2, Register:i32 $vgpr0, Register:i32 $vgpr1, t13, t13:1
+; CHECK-NEXT:    t0: ch = EntryToken
+; CHECK-NEXT:    t7: ch,glue = CopyToReg t0, Register:i32 $vgpr0, t5
+; CHECK-NEXT:    t9: ch,glue = CopyToReg t7, Register:i32 $vgpr1, t5, t7:1
+; CHECK-NEXT:    t10: ch = SI_RETURN Register:i32 $vgpr0, Register:i32 $vgpr1, t9, t9:1
 ; CHECK-EMPTY:
   %loc = alloca i16
   %j = load i16, i16 * %loc
@@ -56,14 +50,12 @@ define i64 @i16_test(i16 %i) nounwind readnone {
 
 define i64 @i8_test(i8 %i) nounwind readnone {
 ; CHECK-LABEL: i8_test:
-; CHECK:       SelectionDAG has 12 nodes:
-; CHECK-NEXT:    t0: ch = EntryToken
+; CHECK:       SelectionDAG has 8 nodes:
 ; CHECK-NEXT:    t5: i32 = V_MOV_B32_e32 TargetConstant:i32<0>
-; CHECK-NEXT:    t7: i64,ch = CopyFromReg t0, Register:i64 %1
-; CHECK-NEXT:    t9: ch,glue = CopyToReg t0, Register:i64 %2, t7
-; CHECK-NEXT:    t11: ch,glue = CopyToReg t9, Register:i32 $vgpr0, t5, t9:1
-; CHECK-NEXT:    t13: ch,glue = CopyToReg t11, Register:i32 $vgpr1, t5, t11:1
-; CHECK-NEXT:    t14: ch = S_SETPC_B64_return Register:i64 %2, Register:i32 $vgpr0, Register:i32 $vgpr1, t13, t13:1
+; CHECK-NEXT:    t0: ch = EntryToken
+; CHECK-NEXT:    t7: ch,glue = CopyToReg t0, Register:i32 $vgpr0, t5
+; CHECK-NEXT:    t9: ch,glue = CopyToReg t7, Register:i32 $vgpr1, t5, t7:1
+; CHECK-NEXT:    t10: ch = SI_RETURN Register:i32 $vgpr0, Register:i32 $vgpr1, t9, t9:1
 ; CHECK-EMPTY:
   %loc = alloca i8
   %j = load i8, i8 * %loc


        


More information about the llvm-commits mailing list