[llvm] 4067de5 - [AMDGPU] Select s_cselect

Piotr Sobczak via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 19 07:19:13 PDT 2020


Author: Piotr Sobczak
Date: 2020-06-19T16:17:46+02:00
New Revision: 4067de569f119a81419fbf2e79d5f3307dfdda5b

URL: https://github.com/llvm/llvm-project/commit/4067de569f119a81419fbf2e79d5f3307dfdda5b
DIFF: https://github.com/llvm/llvm-project/commit/4067de569f119a81419fbf2e79d5f3307dfdda5b.diff

LOG: [AMDGPU] Select s_cselect

Summary:
Add patterns to select s_cselect in the isel.

Handle more cases of implicit SCC accesses in si-fix-sgpr-copies
to allow new patterns to work.

Subscribers: arsenm, kzhuravl, jvesely, wdng, nhaehnle, yaxunl, dstuttard, tpr, t-tye, hiraditya, asbirlea, kerbowa, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D81925

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
    llvm/lib/Target/AMDGPU/SIInstrInfo.h
    llvm/lib/Target/AMDGPU/SOPInstructions.td
    llvm/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
    llvm/test/CodeGen/AMDGPU/addrspacecast.ll
    llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
    llvm/test/CodeGen/AMDGPU/ctlz.ll
    llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
    llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll
    llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
    llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
    llvm/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
    llvm/test/CodeGen/AMDGPU/fceil64.ll
    llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir
    llvm/test/CodeGen/AMDGPU/fshl.ll
    llvm/test/CodeGen/AMDGPU/fshr.ll
    llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
    llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
    llvm/test/CodeGen/AMDGPU/mad_uint24.ll
    llvm/test/CodeGen/AMDGPU/sad.ll
    llvm/test/CodeGen/AMDGPU/sdiv.ll
    llvm/test/CodeGen/AMDGPU/sdiv64.ll
    llvm/test/CodeGen/AMDGPU/select-i1.ll
    llvm/test/CodeGen/AMDGPU/select-opt.ll
    llvm/test/CodeGen/AMDGPU/select-vectors.ll
    llvm/test/CodeGen/AMDGPU/select64.ll
    llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
    llvm/test/CodeGen/AMDGPU/srem64.ll
    llvm/test/CodeGen/AMDGPU/trunc.ll
    llvm/test/CodeGen/AMDGPU/udiv64.ll
    llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
    llvm/test/CodeGen/AMDGPU/urem64.ll
    llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
    llvm/test/CodeGen/AMDGPU/vselect.ll
    llvm/test/CodeGen/AMDGPU/wave32.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 07dc55b59f6e..299e359a8e76 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -637,6 +637,13 @@ void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
   }
 
   if (RC == &AMDGPU::SReg_64RegClass) {
+    if (SrcReg == AMDGPU::SCC) {
+      BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg)
+          .addImm(1)
+          .addImm(0);
+      return;
+    }
+
     if (DestReg == AMDGPU::VCC) {
       if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
         BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
@@ -663,10 +670,18 @@ void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
   }
 
   if (DestReg == AMDGPU::SCC) {
-    assert(AMDGPU::SReg_32RegClass.contains(SrcReg));
-    BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32))
-      .addReg(SrcReg, getKillRegState(KillSrc))
-      .addImm(0);
+    unsigned Opcode;
+    if (AMDGPU::SReg_32RegClass.contains(SrcReg)) {
+      Opcode = AMDGPU::S_CMP_LG_U32;
+    } else {
+      assert(AMDGPU::SReg_64RegClass.contains(SrcReg));
+      Opcode = AMDGPU::S_CMP_LG_U64;
+    }
+
+    BuildMI(MBB, MI, DL, get(Opcode))
+        .addReg(SrcReg, getKillRegState(KillSrc))
+        .addImm(0);
+
     return;
   }
 
@@ -5397,6 +5412,12 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
       Inst.eraseFromParent();
     }
       continue;
+
+    case AMDGPU::S_CSELECT_B32:
+    case AMDGPU::S_CSELECT_B64:
+      lowerSelect(Worklist, Inst, MDT);
+      Inst.eraseFromParent();
+      continue;
     }
 
     if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
@@ -5537,6 +5558,78 @@ bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
   return false;
 }
 
+void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst,
+                              MachineDominatorTree *MDT) const {
+
+  MachineBasicBlock &MBB = *Inst.getParent();
+  MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+  MachineBasicBlock::iterator MII = Inst;
+  DebugLoc DL = Inst.getDebugLoc();
+
+  MachineOperand &Dest = Inst.getOperand(0);
+  MachineOperand &Src0 = Inst.getOperand(1);
+  MachineOperand &Src1 = Inst.getOperand(2);
+  MachineOperand &Cond = Inst.getOperand(3);
+
+  Register SCCSource = Cond.getReg();
+  // Find SCC def, and if that is a copy (SCC = COPY reg) then use reg instead.
+  if (!Cond.isUndef()) {
+    for (MachineInstr &CandI :
+         make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)),
+                    Inst.getParent()->rend())) {
+      if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) !=
+          -1) {
+        if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) {
+          SCCSource = CandI.getOperand(1).getReg();
+        }
+        break;
+      }
+    }
+  }
+
+  // If this is a trivial select where the condition is effectively not SCC
+  // (SCCSource is a source of copy to SCC), then the select is semantically
+  // equivalent to copying SCCSource. Hence, there is no need to create
+  // V_CNDMASK, we can just use that and bail out.
+  if ((SCCSource != AMDGPU::SCC) && Src0.isImm() && (Src0.getImm() == -1) &&
+      Src1.isImm() && (Src1.getImm() == 0)) {
+    MRI.replaceRegWith(Dest.getReg(), SCCSource);
+    return;
+  }
+
+  const TargetRegisterClass *TC = ST.getWavefrontSize() == 64
+                                      ? &AMDGPU::SReg_64_XEXECRegClass
+                                      : &AMDGPU::SReg_32_XM0_XEXECRegClass;
+  Register CopySCC = MRI.createVirtualRegister(TC);
+
+  if (SCCSource == AMDGPU::SCC) {
+    // Insert a trivial select instead of creating a copy, because a copy from
+    // SCC would semantically mean just copying a single bit, but we may need
+    // the result to be a vector condition mask that needs preserving.
+    unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64
+                                                    : AMDGPU::S_CSELECT_B32;
+    auto NewSelect =
+        BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0);
+    NewSelect->getOperand(3).setIsUndef(Cond.isUndef());
+  } else {
+    BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC).addReg(SCCSource);
+  }
+
+  Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
+
+  auto UpdatedInst =
+      BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg)
+          .addImm(0)
+          .add(Src1) // False
+          .addImm(0)
+          .add(Src0) // True
+          .addReg(CopySCC);
+
+  MRI.replaceRegWith(Dest.getReg(), ResultReg);
+  legalizeOperands(*UpdatedInst, MDT);
+  addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist);
+}
+
 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist,
                                  MachineInstr &Inst) const {
   MachineBasicBlock &MBB = *Inst.getParent();
@@ -6118,6 +6211,8 @@ void SIInstrInfo::movePackToVALU(SetVectorType &Worklist,
 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
                                                MachineInstr &SCCDefInst,
                                                SetVectorType &Worklist) const {
+  bool SCCUsedImplicitly = false;
+
   // Ensure that def inst defines SCC, which is still live.
   assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() &&
          !Op.isDead() && Op.getParent() == &SCCDefInst);
@@ -6132,19 +6227,32 @@ void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
       if (MI.isCopy()) {
         MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
         unsigned DestReg = MI.getOperand(0).getReg();
-        SmallVector<MachineInstr *, 4> Users;
+
         for (auto &User : MRI.use_nodbg_instructions(DestReg)) {
           if ((User.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) ||
               (User.getOpcode() == AMDGPU::S_SUB_CO_PSEUDO)) {
-            Users.push_back(&User);
+            User.getOperand(4).setReg(RI.getVCC());
             Worklist.insert(&User);
+          } else if (User.getOpcode() == AMDGPU::V_CNDMASK_B32_e64) {
+            User.getOperand(5).setReg(RI.getVCC());
+            // No need to add to Worklist.
           }
         }
-        for (auto &U : Users)
-          U->getOperand(4).setReg(RI.getVCC());
         CopyToDelete.push_back(&MI);
-      } else
+      } else {
+        if (MI.getOpcode() == AMDGPU::S_CSELECT_B32 ||
+            MI.getOpcode() == AMDGPU::S_CSELECT_B64) {
+          // This is an implicit use of SCC and it is really expected by
+          // the SCC users to handle.
+          // We cannot preserve the edge to the user so add the explicit
+          // copy: SCC = COPY VCC.
+          // The copy will be cleaned up during the processing of the user
+          // in lowerSelect.
+          SCCUsedImplicitly = true;
+        }
+
         Worklist.insert(&MI);
+      }
     }
     // Exit if we find another SCC def.
     if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1)
@@ -6152,6 +6260,12 @@ void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
   }
   for (auto &Copy : CopyToDelete)
     Copy->eraseFromParent();
+
+  if (SCCUsedImplicitly) {
+    BuildMI(*SCCDefInst.getParent(), std::next(SCCDefInst.getIterator()),
+            SCCDefInst.getDebugLoc(), get(AMDGPU::COPY), AMDGPU::SCC)
+        .addReg(RI.getVCC());
+  }
 }
 
 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass(

diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 0f0e8420f9cf..53e2ffba0f65 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -84,6 +84,9 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
   bool moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst,
                         MachineDominatorTree *MDT = nullptr) const;
 
+  void lowerSelect(SetVectorType &Worklist, MachineInstr &Inst,
+                   MachineDominatorTree *MDT = nullptr) const;
+
   void lowerScalarAbs(SetVectorType &Worklist,
                       MachineInstr &Inst) const;
 

diff  --git a/llvm/lib/Target/AMDGPU/SOPInstructions.td b/llvm/lib/Target/AMDGPU/SOPInstructions.td
index dd6363b1439f..0f0148e92955 100644
--- a/llvm/lib/Target/AMDGPU/SOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SOPInstructions.td
@@ -465,10 +465,21 @@ def S_MAX_U32 : SOP2_32 <"s_max_u32",
 } // End isCommutable = 1
 } // End Defs = [SCC]
 
+class SelectPat<SDPatternOperator select> : PatFrag <
+  (ops node:$src1, node:$src2),
+  (select SCC, $src1, $src2),
+  [{ return N->getOperand(0)->hasOneUse() && !N->isDivergent(); }]
+>;
+
+let Uses = [SCC], AddedComplexity = 20 in {
+  def S_CSELECT_B64 : SOP2_64 <"s_cselect_b64",
+    [(set i64:$sdst, (SelectPat<select> i64:$src0, i64:$src1))]
+  >;
+
+  def S_CSELECT_B32 : SOP2_32 <"s_cselect_b32",
+    [(set i32:$sdst, (SelectPat<select> i32:$src0, i32:$src1))]
+>;
 
-let Uses = [SCC] in {
-  def S_CSELECT_B32 : SOP2_32 <"s_cselect_b32">;
-  def S_CSELECT_B64 : SOP2_64 <"s_cselect_b64">;
 } // End Uses = [SCC]
 
 let Defs = [SCC] in {

diff  --git a/llvm/test/CodeGen/AMDGPU/32-bit-local-address-space.ll b/llvm/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
index 72e62f8dbbfd..eb6c98774ea6 100644
--- a/llvm/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
+++ b/llvm/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
@@ -57,9 +57,9 @@ entry:
 }
 
 ; FUNC-LABEL: {{^}}null_32bit_lds_ptr:
-; SI: v_cmp_ne_u32
+; SI: s_cmp_lg_u32
 ; SI-NOT: v_cmp_ne_u32
-; SI: v_cndmask_b32
+; SI: s_cselect_b32
 define amdgpu_kernel void @null_32bit_lds_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %lds) nounwind {
   %cmp = icmp ne i32 addrspace(3)* %lds, null
   %x = select i1 %cmp, i32 123, i32 456

diff  --git a/llvm/test/CodeGen/AMDGPU/addrspacecast.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
index d16edbac75fe..472fe8e8627e 100644
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
@@ -148,10 +148,12 @@ define amdgpu_kernel void @use_constant_to_global_addrspacecast(i32 addrspace(4)
 ; HSA: enable_sgpr_queue_ptr = 0
 
 ; HSA: s_load_dwordx2 s{{\[}}[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]{{\]}}
-; HSA-DAG: v_cmp_ne_u64_e64 vcc, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}}
-; HSA-DAG: v_mov_b32_e32 v[[VPTR_LO:[0-9]+]], s[[PTR_LO]]
-; HSA-DAG: v_cndmask_b32_e32 [[CASTPTR:v[0-9]+]], -1, v[[VPTR_LO]]
 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 0{{$}}
+; CI-DAG: v_cmp_ne_u64_e64 s{{\[}}[[CMP_LO:[0-9]+]]:[[CMP_HI:[0-9]+]]{{\]}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}}
+; CI-DAG: s_cmp_lg_u64 s{{\[}}[[CMP_LO]]:[[CMP_HI]]{{\]}}, 0
+; GFX9-DAG: s_cmp_lg_u64 s{{\[}}[[CMP_LO:[0-9]+]]:[[CMP_HI:[0-9]+]]{{\]}}, 0
+; HSA-DAG: s_cselect_b32 s[[PTR_LO]], s[[PTR_LO]], -1
+; HSA-DAG: v_mov_b32_e32 [[CASTPTR:v[0-9]+]], s[[PTR_LO]]
 ; HSA: ds_write_b32 [[CASTPTR]], v[[K]]
 define amdgpu_kernel void @use_flat_to_group_addrspacecast(i32* %ptr) #0 {
   %ftos = addrspacecast i32* %ptr to i32 addrspace(3)*
@@ -165,10 +167,12 @@ define amdgpu_kernel void @use_flat_to_group_addrspacecast(i32* %ptr) #0 {
 ; HSA: enable_sgpr_queue_ptr = 0
 
 ; HSA: s_load_dwordx2 s{{\[}}[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]{{\]}}
-; HSA-DAG: v_cmp_ne_u64_e64 vcc, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}}
-; HSA-DAG: v_mov_b32_e32 v[[VPTR_LO:[0-9]+]], s[[PTR_LO]]
-; HSA-DAG: v_cndmask_b32_e32 [[CASTPTR:v[0-9]+]], -1, v[[VPTR_LO]]
 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 0{{$}}
+; CI-DAG: v_cmp_ne_u64_e64 s{{\[}}[[CMP_LO:[0-9]+]]:[[CMP_HI:[0-9]+]]{{\]}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}}
+; CI-DAG: s_cmp_lg_u64 s{{\[}}[[CMP_LO]]:[[CMP_HI]]{{\]}}, 0
+; GFX9-DAG: s_cmp_lg_u64 s{{\[}}[[CMP_LO:[0-9]+]]:[[CMP_HI:[0-9]+]]{{\]}}, 0
+; HSA-DAG: s_cselect_b32 s[[PTR_LO]], s[[PTR_LO]], -1
+; HSA-DAG: v_mov_b32_e32 [[CASTPTR:v[0-9]+]], s[[PTR_LO]]
 ; HSA: buffer_store_dword v[[K]], [[CASTPTR]], s{{\[[0-9]+:[0-9]+\]}}, 0 offen{{$}}
 define amdgpu_kernel void @use_flat_to_private_addrspacecast(i32* %ptr) #0 {
   %ftos = addrspacecast i32* %ptr to i32 addrspace(5)*

diff  --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index de7521c3d30d..491d27846fd2 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -492,15 +492,15 @@ define amdgpu_kernel void @sdiv_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
 ; GCN-NEXT:    s_xor_b32 s0, s0, s1
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 30
-; GCN-NEXT:    s_or_b32 s0, s0, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s0
+; GCN-NEXT:    s_or_b32 s2, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s2, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
 ; GCN-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %r = sdiv i16 %x, %y
@@ -541,24 +541,24 @@ define amdgpu_kernel void @srem_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
 ; GCN-NEXT:    s_load_dword s4, s[0:1], 0xb
 ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s2, s4, 16
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s2
-; GCN-NEXT:    s_sext_i32_i16 s3, s4
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s3
-; GCN-NEXT:    s_xor_b32 s3, s3, s2
+; GCN-NEXT:    s_ashr_i32 s5, s4, 16
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s5
+; GCN-NEXT:    s_sext_i32_i16 s2, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s2
+; GCN-NEXT:    s_xor_b32 s2, s2, s5
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_ashr_i32 s3, s3, 30
-; GCN-NEXT:    s_or_b32 s3, s3, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-NEXT:    s_ashr_i32 s2, s2, 30
+; GCN-NEXT:    s_or_b32 s6, s2, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[2:3], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GCN-NEXT:    s_cselect_b32 s2, s6, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s5
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s2
 ; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
 ; GCN-NEXT:    buffer_store_short v0, off, s[0:3], 0
@@ -703,15 +703,15 @@ define amdgpu_kernel void @sdiv_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
 ; GCN-NEXT:    s_xor_b32 s0, s0, s1
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 30
-; GCN-NEXT:    s_or_b32 s0, s0, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s0
+; GCN-NEXT:    s_or_b32 s2, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s2, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
 ; GCN-NEXT:    buffer_store_byte v0, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %r = sdiv i8 %x, %y
@@ -750,29 +750,29 @@ define amdgpu_kernel void @srem_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
 ; GCN-LABEL: srem_i8:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s0, s[0:1], 0xb
+; GCN-NEXT:    s_load_dword s2, s[0:1], 0xb
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_bfe_i32 s1, s0, 0x80008
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s1
-; GCN-NEXT:    s_sext_i32_i8 s3, s0
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s3
-; GCN-NEXT:    s_xor_b32 s1, s3, s1
+; GCN-NEXT:    s_bfe_i32 s0, s2, 0x80008
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s0
+; GCN-NEXT:    s_sext_i32_i8 s1, s2
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s1
+; GCN-NEXT:    s_xor_b32 s0, s1, s0
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_ashr_i32 s1, s1, 30
-; GCN-NEXT:    s_or_b32 s1, s1, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    s_ashr_i32 s0, s0, 30
+; GCN-NEXT:    s_lshr_b32 s3, s2, 8
+; GCN-NEXT:    s_or_b32 s6, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    s_lshr_b32 s2, s0, 8
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s2
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s0, v0
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s6, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s3
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s2, v0
 ; GCN-NEXT:    buffer_store_byte v0, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %r = srem i8 %x, %y
@@ -2434,71 +2434,71 @@ define amdgpu_kernel void @sdiv_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s9
 ; GCN-NEXT:    s_xor_b32 s8, s9, s8
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_ashr_i32 s2, s2, 16
 ; GCN-NEXT:    s_ashr_i32 s8, s8, 30
-; GCN-NEXT:    s_or_b32 s8, s8, 1
+; GCN-NEXT:    s_or_b32 s10, s8, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s2
-; GCN-NEXT:    v_mov_b32_e32 v3, s8
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s8, s10, 0
+; GCN-NEXT:    s_ashr_i32 s2, s2, 16
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s2
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 16
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v1
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v0
 ; GCN-NEXT:    s_xor_b32 s0, s0, s2
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 30
-; GCN-NEXT:    s_or_b32 s0, s0, 1
-; GCN-NEXT:    v_mul_f32_e32 v3, v2, v3
+; GCN-NEXT:    s_sext_i32_i16 s2, s3
+; GCN-NEXT:    v_mul_f32_e32 v3, v1, v3
 ; GCN-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-NEXT:    v_mad_f32 v2, -v3, v1, v2
-; GCN-NEXT:    v_mov_b32_e32 v4, s0
-; GCN-NEXT:    s_sext_i32_i16 s0, s3
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v1|
+; GCN-NEXT:    v_mad_f32 v1, -v3, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v3, v3
-; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v4, vcc
-; GCN-NEXT:    s_sext_i32_i16 s2, s1
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v1, v3
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s2
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v4, v2
-; GCN-NEXT:    s_xor_b32 s0, s2, s0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, s8, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-NEXT:    s_or_b32 s0, s0, 1
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s2
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s0, s0, 0
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, s0, v3
+; GCN-NEXT:    s_sext_i32_i16 s0, s1
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v4, v0
+; GCN-NEXT:    s_xor_b32 s0, s0, s2
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 30
 ; GCN-NEXT:    s_or_b32 s0, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v4, v1, v4
 ; GCN-NEXT:    v_trunc_f32_e32 v4, v4
-; GCN-NEXT:    v_mad_f32 v1, -v4, v2, v1
-; GCN-NEXT:    v_mov_b32_e32 v5, s0
-; GCN-NEXT:    s_ashr_i32 s0, s3, 16
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v2|
+; GCN-NEXT:    v_mad_f32 v1, -v4, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s0, s0, 0
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v4, v4
-; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v5, vcc
-; GCN-NEXT:    s_ashr_i32 s1, s1, 16
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v4
-; GCN-NEXT:    v_cvt_f32_i32_e32 v4, s1
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v5, v2
-; GCN-NEXT:    s_xor_b32 s0, s1, s0
+; GCN-NEXT:    s_ashr_i32 s2, s3, 16
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s2
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, s0, v4
+; GCN-NEXT:    s_ashr_i32 s0, s1, 16
+; GCN-NEXT:    v_cvt_f32_i32_e32 v4, s0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v5, v0
+; GCN-NEXT:    s_xor_b32 s0, s0, s2
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 30
-; GCN-NEXT:    s_or_b32 s0, s0, 1
+; GCN-NEXT:    s_or_b32 s2, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v5, v4, v5
 ; GCN-NEXT:    v_trunc_f32_e32 v5, v5
-; GCN-NEXT:    v_mad_f32 v4, -v5, v2, v4
+; GCN-NEXT:    v_mad_f32 v4, -v5, v0, v4
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v5, v5
-; GCN-NEXT:    v_mov_b32_e32 v6, s0
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v2|
-; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v6, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v4|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s2, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v5
 ; GCN-NEXT:    s_mov_b32 s0, 0xffff
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GCN-NEXT:    v_and_b32_e32 v1, s0, v1
-; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
-; GCN-NEXT:    v_and_b32_e32 v0, s0, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v2
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v0
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v3
+; GCN-NEXT:    v_and_b32_e32 v2, s0, v2
+; GCN-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %r = sdiv <4 x i16> %x, %y
@@ -2629,15 +2629,15 @@ define amdgpu_kernel void @srem_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x
 ; GCN-NEXT:    s_xor_b32 s8, s9, s8
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_ashr_i32 s8, s8, 30
-; GCN-NEXT:    s_or_b32 s8, s8, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s8
+; GCN-NEXT:    s_or_b32 s10, s8, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s8, s10, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s8, v2
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s2
 ; GCN-NEXT:    s_ashr_i32 s2, s2, 16
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s2
@@ -2647,15 +2647,15 @@ define amdgpu_kernel void @srem_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v1
 ; GCN-NEXT:    s_xor_b32 s8, s0, s2
 ; GCN-NEXT:    s_ashr_i32 s8, s8, 30
-; GCN-NEXT:    s_or_b32 s8, s8, 1
+; GCN-NEXT:    s_or_b32 s10, s8, 1
 ; GCN-NEXT:    v_mul_f32_e32 v3, v2, v3
 ; GCN-NEXT:    v_trunc_f32_e32 v3, v3
 ; GCN-NEXT:    v_mad_f32 v2, -v3, v1, v2
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v3, v3
-; GCN-NEXT:    v_mov_b32_e32 v4, s8
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v1|
-; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v2|, |v1|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s8, s10, 0
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, s8, v3
 ; GCN-NEXT:    v_mul_lo_u32 v1, v1, s2
 ; GCN-NEXT:    s_sext_i32_i16 s2, s3
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s2
@@ -2669,33 +2669,33 @@ define amdgpu_kernel void @srem_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x
 ; GCN-NEXT:    v_mul_f32_e32 v4, v1, v4
 ; GCN-NEXT:    v_trunc_f32_e32 v4, v4
 ; GCN-NEXT:    v_mad_f32 v1, -v4, v2, v1
-; GCN-NEXT:    v_mov_b32_e32 v5, s0
-; GCN-NEXT:    s_ashr_i32 s0, s3, 16
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v2|
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v4, v4
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v2|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s0, s0, 0
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, s0, v4
+; GCN-NEXT:    s_ashr_i32 s0, s3, 16
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v5, vcc
-; GCN-NEXT:    s_ashr_i32 s2, s1, 16
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v4
-; GCN-NEXT:    v_cvt_f32_i32_e32 v4, s2
+; GCN-NEXT:    s_ashr_i32 s8, s1, 16
+; GCN-NEXT:    v_cvt_f32_i32_e32 v4, s8
+; GCN-NEXT:    s_xor_b32 s2, s8, s0
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v5, v2
+; GCN-NEXT:    s_ashr_i32 s2, s2, 30
 ; GCN-NEXT:    v_mul_lo_u32 v1, v1, s3
-; GCN-NEXT:    s_xor_b32 s3, s2, s0
-; GCN-NEXT:    s_ashr_i32 s3, s3, 30
+; GCN-NEXT:    s_or_b32 s9, s2, 1
 ; GCN-NEXT:    v_mul_f32_e32 v5, v4, v5
 ; GCN-NEXT:    v_trunc_f32_e32 v5, v5
 ; GCN-NEXT:    v_mad_f32 v4, -v5, v2, v4
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v5, v5
-; GCN-NEXT:    s_or_b32 s3, s3, 1
-; GCN-NEXT:    v_mov_b32_e32 v6, s3
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v2|
-; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v6, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[2:3], |v4|, |v2|
+; GCN-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; GCN-NEXT:    s_cselect_b32 s2, s9, 0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, s2, v5
 ; GCN-NEXT:    v_mul_lo_u32 v2, v2, s0
 ; GCN-NEXT:    s_mov_b32 s0, 0xffff
 ; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s1, v1
 ; GCN-NEXT:    v_and_b32_e32 v1, s0, v1
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s2, v2
+; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s8, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
 ; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
@@ -2849,15 +2849,15 @@ define amdgpu_kernel void @sdiv_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
 ; GCN-NEXT:    s_xor_b32 s0, s0, s1
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 30
-; GCN-NEXT:    s_or_b32 s0, s0, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s0
+; GCN-NEXT:    s_or_b32 s2, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s2, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
 ; GCN-NEXT:    v_and_b32_e32 v0, 7, v0
 ; GCN-NEXT:    buffer_store_byte v0, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
@@ -2897,29 +2897,29 @@ define amdgpu_kernel void @srem_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
 ; GCN-LABEL: srem_i3:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s0, s[0:1], 0xb
+; GCN-NEXT:    s_load_dword s2, s[0:1], 0xb
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_bfe_i32 s1, s0, 0x30008
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s1
-; GCN-NEXT:    s_bfe_i32 s3, s0, 0x30000
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s3
-; GCN-NEXT:    s_xor_b32 s1, s3, s1
+; GCN-NEXT:    s_bfe_i32 s0, s2, 0x30008
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s0
+; GCN-NEXT:    s_bfe_i32 s1, s2, 0x30000
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s1
+; GCN-NEXT:    s_xor_b32 s0, s1, s0
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_ashr_i32 s1, s1, 30
-; GCN-NEXT:    s_or_b32 s1, s1, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    s_ashr_i32 s0, s0, 30
+; GCN-NEXT:    s_lshr_b32 s3, s2, 8
+; GCN-NEXT:    s_or_b32 s6, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    s_lshr_b32 s2, s0, 8
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s2
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s0, v0
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s6, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s3
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s2, v0
 ; GCN-NEXT:    v_and_b32_e32 v0, 7, v0
 ; GCN-NEXT:    buffer_store_byte v0, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
@@ -3270,54 +3270,54 @@ define amdgpu_kernel void @sdiv_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s8
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s9
 ; GCN-NEXT:    s_xor_b32 s8, s9, s8
-; GCN-NEXT:    s_ashr_i32 s0, s0, 16
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_ashr_i32 s8, s8, 30
-; GCN-NEXT:    s_or_b32 s8, s8, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s8
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; GCN-NEXT:    s_or_b32 s10, s8, 1
+; GCN-NEXT:    s_sext_i32_i16 s1, s1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s8, s10, 0
+; GCN-NEXT:    s_ashr_i32 s0, s0, 16
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s0
 ; GCN-NEXT:    s_ashr_i32 s2, s2, 16
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s2
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v1
 ; GCN-NEXT:    s_xor_b32 s0, s2, s0
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, s8, v2
+; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s2
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v0
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 30
 ; GCN-NEXT:    s_or_b32 s0, s0, 1
+; GCN-NEXT:    v_and_b32_e32 v1, 0xffff, v1
 ; GCN-NEXT:    v_mul_f32_e32 v3, v2, v3
 ; GCN-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-NEXT:    v_mad_f32 v2, -v3, v1, v2
-; GCN-NEXT:    v_mov_b32_e32 v4, s0
-; GCN-NEXT:    s_sext_i32_i16 s0, s1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v1|
+; GCN-NEXT:    v_mad_f32 v2, -v3, v0, v2
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v3, v3
-; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v4, vcc
-; GCN-NEXT:    s_sext_i32_i16 s1, s3
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; GCN-NEXT:    v_cvt_f32_i32_e32 v3, s1
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v4, v2
-; GCN-NEXT:    s_xor_b32 s0, s1, s0
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v2|, |v0|
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s1
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s0, s0, 0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, s0, v3
+; GCN-NEXT:    s_sext_i32_i16 s0, s3
+; GCN-NEXT:    v_cvt_f32_i32_e32 v3, s0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v4, v0
+; GCN-NEXT:    s_xor_b32 s0, s0, s1
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 30
-; GCN-NEXT:    s_or_b32 s0, s0, 1
+; GCN-NEXT:    s_or_b32 s2, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v4, v3, v4
 ; GCN-NEXT:    v_trunc_f32_e32 v4, v4
-; GCN-NEXT:    v_mad_f32 v3, -v4, v2, v3
+; GCN-NEXT:    v_mad_f32 v3, -v4, v0, v3
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v4, v4
-; GCN-NEXT:    v_mov_b32_e32 v5, s0
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v2|
-; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
-; GCN-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; GCN-NEXT:    v_or_b32_e32 v0, v0, v1
-; GCN-NEXT:    buffer_store_short v2, off, s[4:7], 0 offset:4
-; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v3|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s2, 0
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v4
+; GCN-NEXT:    v_or_b32_e32 v1, v1, v2
+; GCN-NEXT:    buffer_store_short v0, off, s[4:7], 0 offset:4
+; GCN-NEXT:    buffer_store_dword v1, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %r = sdiv <3 x i16> %x, %y
   store <3 x i16> %r, <3 x i16> addrspace(1)* %out
@@ -3422,20 +3422,19 @@ define amdgpu_kernel void @srem_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x
 ; GCN-NEXT:    s_ashr_i32 s6, s6, 30
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_or_b32 s6, s6, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s6
-; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s6, s6, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s6, v2
 ; GCN-NEXT:    v_mov_b32_e32 v2, s0
 ; GCN-NEXT:    v_alignbit_b32 v2, s1, v2, 16
 ; GCN-NEXT:    v_bfe_i32 v3, v2, 0, 16
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v4, v3
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
 ; GCN-NEXT:    v_alignbit_b32 v1, s3, v1, 16
 ; GCN-NEXT:    v_bfe_i32 v5, v1, 0, 16
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v6, v5
@@ -3453,7 +3452,7 @@ define amdgpu_kernel void @srem_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v4, s0
 ; GCN-NEXT:    v_or_b32_e32 v3, 1, v3
 ; GCN-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
 ; GCN-NEXT:    s_sext_i32_i16 s2, s3
 ; GCN-NEXT:    v_mul_lo_u32 v2, v3, v2
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v3, s2
@@ -3465,12 +3464,13 @@ define amdgpu_kernel void @srem_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x
 ; GCN-NEXT:    v_trunc_f32_e32 v5, v5
 ; GCN-NEXT:    v_mad_f32 v3, -v5, v4, v3
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v5, v5
-; GCN-NEXT:    v_mov_b32_e32 v6, s0
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v4|
-; GCN-NEXT:    v_cndmask_b32_e32 v3, 0, v6, vcc
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v3|, |v4|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s0, s0, 0
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, s0, v5
 ; GCN-NEXT:    v_mul_lo_u32 v3, v3, s1
 ; GCN-NEXT:    v_sub_i32_e32 v1, vcc, v1, v2
+; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GCN-NEXT:    v_and_b32_e32 v0, 0xffff, v0
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s3, v3
@@ -3844,51 +3844,51 @@ define amdgpu_kernel void @sdiv_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v3, s1
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v4, v2
 ; GCN-NEXT:    s_xor_b32 s1, s1, s3
-; GCN-NEXT:    s_bfe_i32 s0, s0, 0xf000f
 ; GCN-NEXT:    s_ashr_i32 s1, s1, 30
+; GCN-NEXT:    s_or_b32 s1, s1, 1
 ; GCN-NEXT:    v_mul_f32_e32 v4, v3, v4
 ; GCN-NEXT:    v_trunc_f32_e32 v4, v4
 ; GCN-NEXT:    v_mad_f32 v3, -v4, v2, v3
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v2|
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v3|, |v2|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s1, s1, 0
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v4, v4
-; GCN-NEXT:    v_cvt_f32_i32_e32 v3, s0
-; GCN-NEXT:    s_or_b32 s1, s1, 1
-; GCN-NEXT:    v_mov_b32_e32 v5, s1
-; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v5, vcc
+; GCN-NEXT:    s_bfe_i32 s0, s0, 0xf000f
+; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s0
+; GCN-NEXT:    v_bfe_i32 v1, v1, 0, 15
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, s1, v4
 ; GCN-NEXT:    s_bfe_i32 s1, s2, 0xf000f
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v4, s1
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v5, v3
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v5, v2
 ; GCN-NEXT:    s_xor_b32 s0, s1, s0
-; GCN-NEXT:    v_bfe_i32 v1, v1, 0, 15
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 30
+; GCN-NEXT:    s_or_b32 s2, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v5, v4, v5
 ; GCN-NEXT:    v_trunc_f32_e32 v5, v5
-; GCN-NEXT:    v_mad_f32 v4, -v5, v3, v4
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v3|
+; GCN-NEXT:    v_mad_f32 v4, -v5, v2, v4
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v4|, |v2|
+; GCN-NEXT:    v_cvt_f32_i32_e32 v2, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v5, v5
-; GCN-NEXT:    v_cvt_f32_i32_e32 v4, v1
-; GCN-NEXT:    s_or_b32 s0, s0, 1
-; GCN-NEXT:    v_mov_b32_e32 v6, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v3, 0, v6, vcc
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s2, 0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 15
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, s0, v5
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v5, v0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v6, v4
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v6, v2
 ; GCN-NEXT:    v_xor_b32_e32 v0, v0, v1
 ; GCN-NEXT:    v_ashrrev_i32_e32 v0, 30, v0
 ; GCN-NEXT:    v_or_b32_e32 v0, 1, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, v5, v6
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
-; GCN-NEXT:    v_mad_f32 v5, -v1, v4, v5
+; GCN-NEXT:    v_mad_f32 v5, -v1, v2, v5
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, |v4|
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, |v2|
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
 ; GCN-NEXT:    s_movk_i32 s0, 0x7fff
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_and_b32_e32 v3, s0, v3
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GCN-NEXT:    v_and_b32_e32 v2, s0, v3
+; GCN-NEXT:    v_and_b32_e32 v3, s0, v4
 ; GCN-NEXT:    v_lshl_b64 v[0:1], v[0:1], 30
-; GCN-NEXT:    v_and_b32_e32 v2, s0, v2
 ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 15, v3
 ; GCN-NEXT:    v_or_b32_e32 v2, v2, v3
 ; GCN-NEXT:    v_or_b32_e32 v0, v2, v0
@@ -3993,52 +3993,52 @@ define amdgpu_kernel void @srem_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s0
 ; GCN-NEXT:    v_alignbit_b32 v0, s3, v0, 30
 ; GCN-NEXT:    s_movk_i32 s3, 0x7fff
-; GCN-NEXT:    s_and_b32 s11, s0, s3
-; GCN-NEXT:    s_bfe_i32 s11, s11, 0xf0000
-; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s11
-; GCN-NEXT:    s_and_b32 s9, s2, s3
-; GCN-NEXT:    s_bfe_i32 s9, s9, 0xf0000
-; GCN-NEXT:    v_cvt_f32_i32_e32 v3, s9
+; GCN-NEXT:    v_alignbit_b32 v1, s1, v1, 30
+; GCN-NEXT:    s_and_b32 s1, s0, s3
+; GCN-NEXT:    s_bfe_i32 s1, s1, 0xf0000
+; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s1
+; GCN-NEXT:    s_and_b32 s8, s2, s3
+; GCN-NEXT:    s_bfe_i32 s8, s8, 0xf0000
+; GCN-NEXT:    v_cvt_f32_i32_e32 v3, s8
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v4, v2
-; GCN-NEXT:    s_xor_b32 s9, s9, s11
-; GCN-NEXT:    s_ashr_i32 s9, s9, 30
-; GCN-NEXT:    s_or_b32 s9, s9, 1
+; GCN-NEXT:    s_xor_b32 s1, s8, s1
+; GCN-NEXT:    s_ashr_i32 s1, s1, 30
+; GCN-NEXT:    s_lshr_b32 s10, s2, 15
 ; GCN-NEXT:    v_mul_f32_e32 v4, v3, v4
 ; GCN-NEXT:    v_trunc_f32_e32 v4, v4
 ; GCN-NEXT:    v_mad_f32 v3, -v4, v2, v3
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v4, v4
-; GCN-NEXT:    v_mov_b32_e32 v5, s9
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v2|
-; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v5, vcc
-; GCN-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    s_bfe_u32 s12, s0, 0xf000f
-; GCN-NEXT:    v_alignbit_b32 v1, s1, v1, 30
+; GCN-NEXT:    s_bfe_u32 s11, s2, 0xf000f
+; GCN-NEXT:    s_lshr_b32 s12, s0, 15
+; GCN-NEXT:    s_bfe_u32 s13, s0, 0xf000f
+; GCN-NEXT:    s_or_b32 s1, s1, 1
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v3|, |v2|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s1, s1, 0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, s1, v4
 ; GCN-NEXT:    v_mul_lo_u32 v2, v2, s0
-; GCN-NEXT:    s_lshr_b32 s1, s0, 15
-; GCN-NEXT:    s_bfe_i32 s0, s12, 0xf0000
+; GCN-NEXT:    s_bfe_i32 s0, s13, 0xf0000
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v3, s0
-; GCN-NEXT:    s_bfe_u32 s10, s2, 0xf000f
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s2, v2
-; GCN-NEXT:    s_lshr_b32 s8, s2, 15
-; GCN-NEXT:    s_bfe_i32 s2, s10, 0xf0000
-; GCN-NEXT:    v_cvt_f32_i32_e32 v4, s2
+; GCN-NEXT:    s_bfe_i32 s1, s11, 0xf0000
+; GCN-NEXT:    v_cvt_f32_i32_e32 v4, s1
+; GCN-NEXT:    s_xor_b32 s0, s1, s0
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v5, v3
-; GCN-NEXT:    s_xor_b32 s0, s2, s0
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 30
-; GCN-NEXT:    s_or_b32 s0, s0, 1
+; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s2, v2
+; GCN-NEXT:    s_or_b32 s2, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v5, v4, v5
 ; GCN-NEXT:    v_trunc_f32_e32 v5, v5
 ; GCN-NEXT:    v_mad_f32 v4, -v5, v3, v4
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v5, v5
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v4|, |v3|
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
 ; GCN-NEXT:    v_and_b32_e32 v1, s3, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v3|
-; GCN-NEXT:    v_mov_b32_e32 v6, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v3, 0, v6, vcc
+; GCN-NEXT:    s_cselect_b32 s0, s2, 0
 ; GCN-NEXT:    v_bfe_i32 v4, v1, 0, 15
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, s0, v5
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v5, v4
 ; GCN-NEXT:    v_and_b32_e32 v0, s3, v0
 ; GCN-NEXT:    v_bfe_i32 v6, v0, 0, 15
@@ -4053,11 +4053,11 @@ define amdgpu_kernel void @srem_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v6, v6
 ; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v7|, |v5|
 ; GCN-NEXT:    v_cndmask_b32_e32 v4, 0, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v3, v3, s1
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
+; GCN-NEXT:    v_mul_lo_u32 v3, v3, s12
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
 ; GCN-NEXT:    v_mul_lo_u32 v1, v4, v1
 ; GCN-NEXT:    v_and_b32_e32 v2, s3, v2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s8, v3
+; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s10, v3
 ; GCN-NEXT:    v_and_b32_e32 v3, s3, v3
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
 ; GCN-NEXT:    v_lshl_b64 v[0:1], v[0:1], 30
@@ -5330,121 +5330,121 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    s_movk_i32 s2, 0xfee0
 ; GCN-NEXT:    s_mov_b32 s3, 0x68958c89
-; GCN-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s4, s8
+; GCN-NEXT:    s_movk_i32 s8, 0x11f
 ; GCN-NEXT:    v_mul_lo_u32 v2, v0, s2
 ; GCN-NEXT:    v_mul_hi_u32 v3, v0, s3
 ; GCN-NEXT:    v_mul_lo_u32 v4, v1, s3
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, s3
+; GCN-NEXT:    s_mov_b32 s5, s9
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_mul_lo_u32 v3, v0, s3
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v4, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v6, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v9, v1, v2
+; GCN-NEXT:    v_mul_lo_u32 v3, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v4, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v6, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v7, v1, v2
 ; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    s_movk_i32 s4, 0x11e
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; GCN-NEXT:    v_mul_lo_u32 v6, v1, v3
-; GCN-NEXT:    v_mul_hi_u32 v3, v1, v3
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v8, v4, vcc
-; GCN-NEXT:    s_mov_b32 s10, -1
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v4, v3, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v9, v7, vcc
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v6, vcc
+; GCN-NEXT:    v_mul_lo_u32 v6, v1, v5
+; GCN-NEXT:    v_mul_hi_u32 v5, v1, v5
+; GCN-NEXT:    s_movk_i32 s9, 0x11e
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v4, v5, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v4, vcc
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v0, s2
-; GCN-NEXT:    v_mul_hi_u32 v5, v0, s3
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v5, vcc
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, s2
+; GCN-NEXT:    v_mul_hi_u32 v7, v0, s3
 ; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v6, v2, s3
+; GCN-NEXT:    v_mul_lo_u32 v8, v2, s3
 ; GCN-NEXT:    s_mov_b32 s2, 0x976a7377
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, s3
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v10, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v9, v0, v5
-; GCN-NEXT:    v_mul_hi_u32 v11, v2, v4
-; GCN-NEXT:    s_movk_i32 s3, 0x11f
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
-; GCN-NEXT:    v_addc_u32_e32 v9, vcc, v8, v10, vcc
-; GCN-NEXT:    v_mul_lo_u32 v10, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v5, v2, v5
-; GCN-NEXT:    v_mul_lo_u32 v2, v2, v4
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v6, v10
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v9, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v11, v7, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v8, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
+; GCN-NEXT:    v_mul_lo_u32 v7, v0, s3
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
+; GCN-NEXT:    v_mul_lo_u32 v8, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v10, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v9, v0, v7
+; GCN-NEXT:    v_mul_hi_u32 v11, v2, v5
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
+; GCN-NEXT:    v_addc_u32_e32 v9, vcc, 0, v10, vcc
+; GCN-NEXT:    v_mul_lo_u32 v10, v2, v7
+; GCN-NEXT:    v_mul_hi_u32 v7, v2, v7
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, v5
+; GCN-NEXT:    v_add_i32_e32 v8, vcc, v8, v10
+; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v9, v7, vcc
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v11, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v6, v5, vcc
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v4, s[0:1]
+; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[0:1]
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s6, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s6, v0
-; GCN-NEXT:    v_mul_hi_u32 v4, s6, v1
-; GCN-NEXT:    v_mul_hi_u32 v5, s7, v1
-; GCN-NEXT:    v_mul_lo_u32 v1, s7, v1
+; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
+; GCN-NEXT:    v_mul_hi_u32 v5, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v7, s11, v1
+; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s7, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s7, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v8, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s3
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v0, s8
 ; GCN-NEXT:    v_mul_hi_u32 v3, v0, s2
 ; GCN-NEXT:    v_mul_lo_u32 v4, v1, s2
-; GCN-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-NEXT:    v_mov_b32_e32 v5, s8
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_mul_lo_u32 v3, v0, s2
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s7, v2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s6, v3
-; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s2, v3
-; GCN-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s4, v4
-; GCN-NEXT:    s_mov_b32 s2, 0x976a7376
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s2, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s3, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v5, s[0:1], 2, v0
-; GCN-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 1, v0
-; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v6, s7
-; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v6, v2, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s4, v2
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s11, v2
+; GCN-NEXT:    v_sub_i32_e64 v3, s[0:1], s10, v3
+; GCN-NEXT:    v_subb_u32_e64 v4, vcc, v4, v5, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, s2, v3
+; GCN-NEXT:    v_subbrev_u32_e32 v4, vcc, 0, v4, vcc
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s9, v4
+; GCN-NEXT:    s_mov_b32 s10, 0x976a7376
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s2, v3
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s10, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s8, v4
+; GCN-NEXT:    v_cndmask_b32_e32 v4, v6, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[2:3]
+; GCN-NEXT:    v_mov_b32_e32 v6, s11
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v6, v2, s[0:1]
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s9, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s10, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s3, v2
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s8, v2
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %r = udiv i64 %x, 1235195949943
   store i64 %r, i64 addrspace(1)* %out
@@ -5619,6 +5619,7 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
 ; GCN-NEXT:    s_lshr_b64 s[2:3], s[8:9], 12
+; GCN-NEXT:    s_movk_i32 s8, 0xffe
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v4, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v6, v2, vcc
@@ -5633,25 +5634,24 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v3, v2, vcc
 ; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s0, v4
 ; GCN-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v2, vcc
-; GCN-NEXT:    s_movk_i32 s0, 0xffe
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v3
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s8, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e32 v3, -1, v3, vcc
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v4
 ; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v2, -1, v4, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v8, v6, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v1, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v7, v5, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v0, v1, s[0:1]
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s8, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v3
+; GCN-NEXT:    v_cndmask_b32_e32 v2, -1, v4, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v3, v8, v6, s[0:1]
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v1, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v1, v7, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
@@ -5713,120 +5713,120 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    s_movk_i32 s2, 0xfee0
 ; GCN-NEXT:    s_mov_b32 s3, 0x689e0837
-; GCN-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s4, s8
+; GCN-NEXT:    s_movk_i32 s8, 0x11f
 ; GCN-NEXT:    v_mul_lo_u32 v2, v0, s2
 ; GCN-NEXT:    v_mul_hi_u32 v3, v0, s3
 ; GCN-NEXT:    v_mul_lo_u32 v4, v1, s3
-; GCN-NEXT:    s_movk_i32 s12, 0x11f
-; GCN-NEXT:    s_mov_b32 s13, 0x9761f7c9
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, s3
+; GCN-NEXT:    s_mov_b32 s12, 0x9761f7c9
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_mul_lo_u32 v3, v0, s3
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v4, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v6, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v9, v1, v2
+; GCN-NEXT:    v_mul_lo_u32 v3, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v4, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v6, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v7, v1, v2
 ; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; GCN-NEXT:    v_mul_lo_u32 v6, v1, v3
-; GCN-NEXT:    v_mul_hi_u32 v3, v1, v3
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v8, v4, vcc
-; GCN-NEXT:    s_movk_i32 s5, 0x11e
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v4, v3, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v9, v7, vcc
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v6, vcc
+; GCN-NEXT:    v_mul_lo_u32 v6, v1, v5
+; GCN-NEXT:    v_mul_hi_u32 v5, v1, v5
+; GCN-NEXT:    s_mov_b32 s5, s9
+; GCN-NEXT:    s_movk_i32 s9, 0x11e
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v4, v5, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v4, vcc
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v0, s2
-; GCN-NEXT:    v_mul_hi_u32 v5, v0, s3
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v5, vcc
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, s2
+; GCN-NEXT:    v_mul_hi_u32 v7, v0, s3
 ; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v6, v2, s3
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, s3
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v10, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v9, v0, v5
-; GCN-NEXT:    v_mul_hi_u32 v11, v2, v4
-; GCN-NEXT:    s_mov_b32 s4, 0x9761f7c8
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
-; GCN-NEXT:    v_addc_u32_e32 v9, vcc, v8, v10, vcc
-; GCN-NEXT:    v_mul_lo_u32 v10, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v5, v2, v5
-; GCN-NEXT:    v_mul_lo_u32 v2, v2, v4
-; GCN-NEXT:    s_mov_b32 s10, -1
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v6, v10
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v9, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v11, v7, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v8, v4, vcc
+; GCN-NEXT:    v_mul_lo_u32 v8, v2, s3
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
+; GCN-NEXT:    v_mul_lo_u32 v7, v0, s3
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
+; GCN-NEXT:    v_mul_lo_u32 v8, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v10, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v9, v0, v7
+; GCN-NEXT:    v_mul_hi_u32 v11, v2, v5
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
+; GCN-NEXT:    v_addc_u32_e32 v9, vcc, 0, v10, vcc
+; GCN-NEXT:    v_mul_lo_u32 v10, v2, v7
+; GCN-NEXT:    v_mul_hi_u32 v7, v2, v7
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, v5
+; GCN-NEXT:    v_add_i32_e32 v8, vcc, v8, v10
+; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v9, v7, vcc
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v11, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v6, v5, vcc
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v4, s[0:1]
+; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[0:1]
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s6, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s6, v0
-; GCN-NEXT:    v_mul_hi_u32 v4, s6, v1
-; GCN-NEXT:    v_mul_hi_u32 v5, s7, v1
-; GCN-NEXT:    v_mul_lo_u32 v1, s7, v1
+; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
+; GCN-NEXT:    v_mul_hi_u32 v5, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v7, s11, v1
+; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s7, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s7, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v8, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s12
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s13
-; GCN-NEXT:    v_mul_lo_u32 v1, v1, s13
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s13
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v0, s8
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s12
+; GCN-NEXT:    v_mul_lo_u32 v1, v1, s12
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s12
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s7, v1
-; GCN-NEXT:    v_mov_b32_e32 v3, s12
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s13, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[2:3], s5, v5
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[2:3], s4, v4
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s13, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s12, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v5, s7
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v5, v1, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s5, v1
+; GCN-NEXT:    v_sub_i32_e64 v0, s[0:1], s10, v0
+; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s11, v1
+; GCN-NEXT:    v_mov_b32_e32 v3, s8
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e64 v4, s[2:3], s12, v0
+; GCN-NEXT:    v_subbrev_u32_e64 v5, vcc, 0, v2, s[2:3]
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s9, v5
+; GCN-NEXT:    s_mov_b32 s10, 0x9761f7c8
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s10, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s8, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v6, v6, v7, vcc
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[2:3]
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s12, v4
+; GCN-NEXT:    v_subbrev_u32_e32 v2, vcc, 0, v2, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v6
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[2:3]
+; GCN-NEXT:    v_mov_b32_e32 v5, s11
+; GCN-NEXT:    v_subb_u32_e64 v1, vcc, v5, v1, s[0:1]
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s9, v1
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s4, v0
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s10, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s12, v1
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s8, v1
 ; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %r = urem i64 %x, 1235195393993
   store i64 %r, i64 addrspace(1)* %out
@@ -6059,30 +6059,30 @@ define amdgpu_kernel void @sdiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_mul_hi_u32 v3, s3, v0
 ; GCN-NEXT:    v_mul_lo_u32 v4, v0, s3
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s0, v4
 ; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s0, v4
 ; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v3, v2, vcc
 ; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s3, v4
 ; GCN-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v2, vcc
-; GCN-NEXT:    s_mov_b32 s0, 0x12d8fa
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v3
+; GCN-NEXT:    s_mov_b32 s3, 0x12d8fa
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s3, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e32 v3, -1, v3, vcc
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v4
 ; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v2, -1, v4, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v7, v5, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v8, v6, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s3, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v3
+; GCN-NEXT:    v_cndmask_b32_e32 v2, -1, v4, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v3, v8, v6, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GCN-NEXT:    v_xor_b32_e32 v0, s2, v0
 ; GCN-NEXT:    v_xor_b32_e32 v1, s2, v1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s2
@@ -6143,26 +6143,28 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    s_add_u32 s2, s2, s12
 ; GCN-NEXT:    s_mov_b32 s13, s12
 ; GCN-NEXT:    s_addc_u32 s3, s3, s12
-; GCN-NEXT:    s_xor_b64 s[2:3], s[2:3], s[12:13]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GCN-NEXT:    s_sub_u32 s4, 0, s2
-; GCN-NEXT:    s_subb_u32 s5, 0, s3
-; GCN-NEXT:    s_ashr_i32 s14, s11, 31
+; GCN-NEXT:    s_xor_b64 s[14:15], s[2:3], s[12:13]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s14
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s15
+; GCN-NEXT:    s_sub_u32 s2, 0, s14
+; GCN-NEXT:    s_subb_u32 s3, 0, s15
+; GCN-NEXT:    s_ashr_i32 s16, s11, 31
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    s_mov_b32 s15, s14
+; GCN-NEXT:    s_mov_b32 s17, s16
 ; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_mov_b32 s4, s8
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GCN-NEXT:    v_mul_lo_u32 v5, s5, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s4, v0
+; GCN-NEXT:    s_mov_b32 s5, s9
+; GCN-NEXT:    v_mul_hi_u32 v3, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v2, s2, v1
+; GCN-NEXT:    v_mul_lo_u32 v5, s3, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s2, v0
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
 ; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
@@ -6183,12 +6185,11 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v2
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v5, vcc
 ; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v5, s4, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v8, s5, v0
-; GCN-NEXT:    s_mov_b32 s5, s9
+; GCN-NEXT:    v_mul_lo_u32 v5, s2, v2
+; GCN-NEXT:    v_mul_hi_u32 v7, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v8, s3, v0
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
-; GCN-NEXT:    v_mul_lo_u32 v7, s4, v0
+; GCN-NEXT:    v_mul_lo_u32 v7, s2, v0
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
 ; GCN-NEXT:    v_mul_lo_u32 v10, v0, v5
 ; GCN-NEXT:    v_mul_hi_u32 v12, v0, v5
@@ -6206,10 +6207,10 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v6, v5, vcc
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[0:1]
-; GCN-NEXT:    s_add_u32 s0, s10, s14
-; GCN-NEXT:    s_addc_u32 s1, s11, s14
+; GCN-NEXT:    s_add_u32 s0, s10, s16
+; GCN-NEXT:    s_addc_u32 s1, s11, s16
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[14:15]
+; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[16:17]
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
 ; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
@@ -6220,48 +6221,47 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    s_mov_b32 s4, s8
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s2, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s2, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s3, v0
-; GCN-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-NEXT:    v_mul_lo_u32 v2, s14, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s14, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s15, v0
+; GCN-NEXT:    v_mov_b32_e32 v5, s15
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_mul_lo_u32 v3, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v3, s14, v0
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s11, v2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s10, v3
-; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s2, v3
-; GCN-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s3, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v5, s[0:1], 2, v0
-; GCN-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 1, v0
-; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[0:1]
+; GCN-NEXT:    v_sub_i32_e64 v3, s[0:1], s10, v3
+; GCN-NEXT:    v_subb_u32_e64 v4, vcc, v4, v5, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, s14, v3
+; GCN-NEXT:    v_subbrev_u32_e32 v4, vcc, 0, v4, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s15, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s14, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s15, v4
+; GCN-NEXT:    v_cndmask_b32_e32 v4, v6, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v6, s11
-; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v6, v2, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s3, v2
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v6, v2, s[0:1]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s15, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v3
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s14, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s3, v2
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s15, v2
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    s_xor_b64 s[0:1], s[14:15], s[12:13]
+; GCN-NEXT:    s_xor_b64 s[0:1], s[16:17], s[12:13]
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
 ; GCN-NEXT:    v_xor_b32_e32 v0, s0, v0
 ; GCN-NEXT:    v_xor_b32_e32 v1, s1, v1
@@ -6419,30 +6419,30 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_mul_hi_u32 v3, s9, v0
 ; GCN-NEXT:    v_mul_lo_u32 v4, v0, s9
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s0, v4
 ; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s0, v4
 ; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v3, v2, vcc
 ; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s9, v4
 ; GCN-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v2, vcc
-; GCN-NEXT:    s_movk_i32 s0, 0xffe
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v3
+; GCN-NEXT:    s_movk_i32 s9, 0xffe
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s9, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e32 v3, -1, v3, vcc
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v4
 ; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v2, -1, v4, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v7, v5, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v8, v6, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s9, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v3
+; GCN-NEXT:    v_cndmask_b32_e32 v2, -1, v4, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v3, v8, v6, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GCN-NEXT:    v_xor_b32_e32 v0, s8, v0
 ; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s8, v0
 ; GCN-NEXT:    v_xor_b32_e32 v1, s8, v1
@@ -6476,8 +6476,8 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x11
 ; GCN-NEXT:    s_mov_b32 s3, 0
 ; GCN-NEXT:    s_movk_i32 s2, 0x1000
-; GCN-NEXT:    s_mov_b32 s18, 0x4f800000
-; GCN-NEXT:    s_mov_b32 s19, 0x5f7ffffc
+; GCN-NEXT:    s_mov_b32 s20, 0x4f800000
+; GCN-NEXT:    s_mov_b32 s21, 0x5f7ffffc
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_lshl_b64 s[12:13], s[2:3], s6
 ; GCN-NEXT:    s_lshl_b64 s[2:3], s[2:3], s4
@@ -6488,24 +6488,28 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    s_xor_b64 s[14:15], s[2:3], s[16:17]
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s14
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s15
-; GCN-NEXT:    s_mov_b32 s20, 0x2f800000
-; GCN-NEXT:    s_mov_b32 s21, 0xcf800000
+; GCN-NEXT:    s_mov_b32 s22, 0x2f800000
+; GCN-NEXT:    s_mov_b32 s23, 0xcf800000
 ; GCN-NEXT:    s_sub_u32 s6, 0, s14
-; GCN-NEXT:    v_mac_f32_e32 v0, s18, v1
+; GCN-NEXT:    v_mac_f32_e32 v0, s20, v1
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    s_subb_u32 s7, 0, s15
 ; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xd
-; GCN-NEXT:    v_mul_f32_e32 v0, s19, v0
-; GCN-NEXT:    v_mul_f32_e32 v1, s20, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, s21, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, s22, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
-; GCN-NEXT:    v_mac_f32_e32 v0, s21, v1
+; GCN-NEXT:    v_mac_f32_e32 v0, s23, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_ashr_i32 s18, s9, 31
+; GCN-NEXT:    s_add_u32 s0, s8, s18
 ; GCN-NEXT:    v_mul_hi_u32 v3, s6, v0
 ; GCN-NEXT:    v_mul_lo_u32 v2, s6, v1
 ; GCN-NEXT:    v_mul_lo_u32 v4, s7, v0
 ; GCN-NEXT:    v_mul_lo_u32 v5, s6, v0
+; GCN-NEXT:    s_mov_b32 s19, s18
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_mul_lo_u32 v3, v0, v2
@@ -6517,6 +6521,8 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v6, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v6, v1, v5
 ; GCN-NEXT:    v_mul_hi_u32 v5, v1, v5
+; GCN-NEXT:    s_addc_u32 s1, s9, s18
+; GCN-NEXT:    s_xor_b64 s[8:9], s[0:1], s[18:19]
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v4, v5, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v4, 0
@@ -6549,13 +6555,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v6, v5, vcc
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[2:3]
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s2, s9, 31
-; GCN-NEXT:    s_add_u32 s0, s8, s2
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    s_mov_b32 s3, s2
-; GCN-NEXT:    s_addc_u32 s1, s9, s2
-; GCN-NEXT:    s_xor_b64 s[8:9], s[0:1], s[2:3]
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v2, s8, v1
 ; GCN-NEXT:    v_mul_hi_u32 v3, s8, v0
@@ -6566,7 +6566,6 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v5, s9, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s9, v0
-; GCN-NEXT:    s_xor_b64 s[2:3], s[2:3], s[16:17]
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
@@ -6581,68 +6580,71 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_mul_lo_u32 v3, s14, v0
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
 ; GCN-NEXT:    v_sub_i32_e32 v5, vcc, s9, v2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s8, v3
-; GCN-NEXT:    v_subb_u32_e64 v5, s[0:1], v5, v7, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v7, s[0:1], s14, v3
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[0:1], 0, v5, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s15, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s14, v7
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s15, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, v8, v7, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 2, v0
-; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v9, s[0:1], 1, v0
-; GCN-NEXT:    v_addc_u32_e64 v10, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    s_ashr_i32 s8, s13, 31
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
-; GCN-NEXT:    s_add_u32 s12, s12, s8
-; GCN-NEXT:    v_cndmask_b32_e64 v5, v10, v8, s[0:1]
+; GCN-NEXT:    v_sub_i32_e64 v3, s[0:1], s8, v3
+; GCN-NEXT:    v_subb_u32_e64 v5, vcc, v5, v7, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e32 v7, vcc, s14, v3
+; GCN-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s15, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s14, v7
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s15, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v5, v8, v7, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 2, v0
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v9, vcc, 1, v0
+; GCN-NEXT:    v_addc_u32_e32 v10, vcc, 0, v1, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v5, v10, v8, s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v8, s9
-; GCN-NEXT:    s_mov_b32 s9, s8
-; GCN-NEXT:    s_addc_u32 s13, s13, s8
-; GCN-NEXT:    s_xor_b64 s[12:13], s[12:13], s[8:9]
+; GCN-NEXT:    s_xor_b64 s[8:9], s[18:19], s[16:17]
+; GCN-NEXT:    s_ashr_i32 s16, s13, 31
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v8, v2, s[0:1]
+; GCN-NEXT:    s_add_u32 s0, s12, s16
+; GCN-NEXT:    s_mov_b32 s17, s16
+; GCN-NEXT:    s_addc_u32 s1, s13, s16
+; GCN-NEXT:    s_xor_b64 s[12:13], s[0:1], s[16:17]
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v10, s12
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v11, s13
-; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v8, v2, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s15, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s14, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s15, v2
-; GCN-NEXT:    v_mac_f32_e32 v10, s18, v11
+; GCN-NEXT:    v_mac_f32_e32 v10, s20, v11
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v8, v3, vcc
 ; GCN-NEXT:    v_rcp_f32_e32 v3, v10
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
-; GCN-NEXT:    s_sub_u32 s14, 0, s12
-; GCN-NEXT:    v_mul_f32_e32 v3, s19, v3
-; GCN-NEXT:    v_mul_f32_e32 v5, s20, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v9, v7, s[2:3]
+; GCN-NEXT:    v_mul_f32_e32 v3, s21, v3
+; GCN-NEXT:    v_mul_f32_e32 v5, s22, v3
 ; GCN-NEXT:    v_trunc_f32_e32 v5, v5
-; GCN-NEXT:    v_mac_f32_e32 v3, s21, v5
+; GCN-NEXT:    v_mac_f32_e32 v3, s23, v5
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v5, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v9, v7, s[0:1]
+; GCN-NEXT:    s_sub_u32 s2, 0, s12
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_mul_hi_u32 v2, s14, v3
-; GCN-NEXT:    v_mul_lo_u32 v7, s14, v5
-; GCN-NEXT:    s_subb_u32 s15, 0, s13
-; GCN-NEXT:    v_mul_lo_u32 v8, s15, v3
-; GCN-NEXT:    v_xor_b32_e32 v0, s2, v0
+; GCN-NEXT:    v_mul_hi_u32 v2, s2, v3
+; GCN-NEXT:    v_mul_lo_u32 v7, s2, v5
+; GCN-NEXT:    s_subb_u32 s3, 0, s13
+; GCN-NEXT:    v_mul_lo_u32 v8, s3, v3
+; GCN-NEXT:    s_ashr_i32 s14, s11, 31
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v7
-; GCN-NEXT:    v_mul_lo_u32 v7, s14, v3
+; GCN-NEXT:    v_mul_lo_u32 v7, s2, v3
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
 ; GCN-NEXT:    v_mul_lo_u32 v8, v3, v2
 ; GCN-NEXT:    v_mul_hi_u32 v10, v3, v2
 ; GCN-NEXT:    v_mul_hi_u32 v9, v3, v7
 ; GCN-NEXT:    v_mul_hi_u32 v11, v5, v2
 ; GCN-NEXT:    v_mul_lo_u32 v2, v5, v2
-; GCN-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GCN-NEXT:    s_mov_b32 s15, s14
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
 ; GCN-NEXT:    v_addc_u32_e32 v9, vcc, 0, v10, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v10, v5, v7
 ; GCN-NEXT:    v_mul_hi_u32 v7, v5, v7
+; GCN-NEXT:    v_xor_b32_e32 v0, s8, v0
+; GCN-NEXT:    v_xor_b32_e32 v1, s9, v1
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, v10, v8
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v9, v7, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v8, vcc, v11, v4, vcc
@@ -6650,11 +6652,11 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_add_i32_e64 v2, s[0:1], v3, v2
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v6, v8, vcc
 ; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v5, v7, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v8, s14, v3
-; GCN-NEXT:    v_mul_hi_u32 v9, s14, v2
-; GCN-NEXT:    v_mul_lo_u32 v10, s15, v2
+; GCN-NEXT:    v_mul_lo_u32 v8, s2, v3
+; GCN-NEXT:    v_mul_hi_u32 v9, s2, v2
+; GCN-NEXT:    v_mul_lo_u32 v10, s3, v2
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
-; GCN-NEXT:    v_mul_lo_u32 v9, s14, v2
+; GCN-NEXT:    v_mul_lo_u32 v9, s2, v2
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, v10, v8
 ; GCN-NEXT:    v_mul_lo_u32 v12, v2, v8
 ; GCN-NEXT:    v_mul_hi_u32 v14, v2, v8
@@ -6671,11 +6673,9 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v9, v3
 ; GCN-NEXT:    v_addc_u32_e32 v8, vcc, v6, v8, vcc
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; GCN-NEXT:    s_ashr_i32 s14, s11, 31
 ; GCN-NEXT:    v_addc_u32_e64 v5, vcc, v5, v8, s[0:1]
 ; GCN-NEXT:    s_add_u32 s0, s10, s14
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    s_mov_b32 s15, s14
 ; GCN-NEXT:    s_addc_u32 s1, s11, s14
 ; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[14:15]
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
@@ -6688,7 +6688,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v9, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v9, s11, v2
 ; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
-; GCN-NEXT:    v_mov_b32_e32 v8, s3
+; GCN-NEXT:    v_mov_b32_e32 v8, s9
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v9, v5
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v2, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v10, v4, vcc
@@ -6696,32 +6696,32 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v4, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v4, s12, v3
 ; GCN-NEXT:    v_mul_hi_u32 v5, s12, v2
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v0
 ; GCN-NEXT:    v_mul_lo_u32 v6, s13, v2
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v8, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
 ; GCN-NEXT:    v_mul_lo_u32 v5, s12, v2
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
 ; GCN-NEXT:    v_sub_i32_e32 v6, vcc, s11, v4
 ; GCN-NEXT:    v_mov_b32_e32 v7, s13
-; GCN-NEXT:    v_sub_i32_e32 v5, vcc, s10, v5
-; GCN-NEXT:    v_subb_u32_e64 v6, s[0:1], v6, v7, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v7, s[0:1], s12, v5
-; GCN-NEXT:    v_subbrev_u32_e64 v6, s[0:1], 0, v6, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s13, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s12, v7
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s13, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v8, v7, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 2, v2
-; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v3, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v9, s[0:1], 1, v2
-; GCN-NEXT:    v_addc_u32_e64 v10, s[0:1], 0, v3, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v10, v8, s[0:1]
+; GCN-NEXT:    v_sub_i32_e64 v5, s[0:1], s10, v5
+; GCN-NEXT:    v_subb_u32_e64 v6, vcc, v6, v7, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e32 v7, vcc, s12, v5
+; GCN-NEXT:    v_subbrev_u32_e32 v6, vcc, 0, v6, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v6
+; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v7
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v6
+; GCN-NEXT:    v_cndmask_b32_e32 v6, v8, v7, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 2, v2
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v9, vcc, 1, v2
+; GCN-NEXT:    v_addc_u32_e32 v10, vcc, 0, v3, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v6
+; GCN-NEXT:    v_cndmask_b32_e64 v6, v10, v8, s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v8, s11
-; GCN-NEXT:    v_subb_u32_e32 v4, vcc, v8, v4, vcc
+; GCN-NEXT:    v_subb_u32_e64 v4, vcc, v8, v4, s[0:1]
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v4
 ; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v5
@@ -6729,9 +6729,9 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v4
 ; GCN-NEXT:    v_cndmask_b32_e32 v4, v8, v5, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v9, v7, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v9, v7, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
-; GCN-NEXT:    s_xor_b64 s[0:1], s[14:15], s[8:9]
+; GCN-NEXT:    s_xor_b64 s[0:1], s[14:15], s[16:17]
 ; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
 ; GCN-NEXT:    v_xor_b32_e32 v2, s0, v2
 ; GCN-NEXT:    v_xor_b32_e32 v3, s1, v3
@@ -6840,28 +6840,28 @@ define amdgpu_kernel void @srem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_mul_lo_u32 v1, v1, s3
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s3
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s0, v0
 ; GCN-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s0, v0
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
 ; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s3, v0
 ; GCN-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_subrev_i32_e32 v4, vcc, s3, v2
 ; GCN-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v3, vcc
-; GCN-NEXT:    s_mov_b32 s0, 0x12d8fa
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v2
+; GCN-NEXT:    s_mov_b32 s3, 0x12d8fa
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s3, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
 ; GCN-NEXT:    v_cndmask_b32_e32 v6, -1, v6, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v5, -1, v5, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s3, v0
+; GCN-NEXT:    v_cndmask_b32_e64 v3, v3, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT:    v_cndmask_b32_e32 v5, -1, v5, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v4, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GCN-NEXT:    v_xor_b32_e32 v0, s2, v0
 ; GCN-NEXT:    v_xor_b32_e32 v1, s2, v1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s2
@@ -7013,25 +7013,25 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    v_mul_lo_u32 v0, s12, v0
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_sub_i32_e64 v0, s[0:1], s10, v0
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s11, v1
 ; GCN-NEXT:    v_mov_b32_e32 v3, s13
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s10, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s12, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v4
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s12, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e64 v4, s[2:3], s12, v0
+; GCN-NEXT:    v_subbrev_u32_e64 v5, vcc, 0, v2, s[2:3]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v6, v6, v7, vcc
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[2:3]
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s12, v4
+; GCN-NEXT:    v_subbrev_u32_e32 v2, vcc, 0, v2, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v6
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v5, s11
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v5, v1, vcc
+; GCN-NEXT:    v_subb_u32_e64 v1, vcc, v5, v1, s[0:1]
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v1
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
@@ -7040,7 +7040,7 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    v_xor_b32_e32 v0, s14, v0
 ; GCN-NEXT:    v_xor_b32_e32 v1, s14, v1
@@ -7220,43 +7220,43 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_mul_lo_u32 v0, s16, v0
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_sub_i32_e64 v0, s[0:1], s8, v0
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s9, v1
 ; GCN-NEXT:    v_mov_b32_e32 v3, s17
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s8, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s16, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v7, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s17, v7
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s16, v5
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s16, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v9, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s17, v7
-; GCN-NEXT:    v_cndmask_b32_e64 v8, v8, v9, s[2:3]
-; GCN-NEXT:    s_ashr_i32 s2, s15, 31
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v8
-; GCN-NEXT:    s_add_u32 s8, s14, s2
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v2, s[0:1]
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e64 v5, s[2:3], s16, v0
+; GCN-NEXT:    v_subbrev_u32_e64 v7, vcc, 0, v2, s[2:3]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s17, v7
+; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s16, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v9, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s17, v7
+; GCN-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[2:3]
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s16, v5
+; GCN-NEXT:    v_subbrev_u32_e32 v2, vcc, 0, v2, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v8
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v2, s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v7, s9
-; GCN-NEXT:    s_mov_b32 s3, s2
-; GCN-NEXT:    s_addc_u32 s9, s15, s2
-; GCN-NEXT:    s_xor_b64 s[8:9], s[8:9], s[2:3]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v8, s8
-; GCN-NEXT:    v_cvt_f32_u32_e32 v9, s9
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v7, v1, vcc
+; GCN-NEXT:    v_subb_u32_e64 v1, vcc, v7, v1, s[0:1]
+; GCN-NEXT:    s_ashr_i32 s0, s15, 31
+; GCN-NEXT:    s_add_u32 s8, s14, s0
+; GCN-NEXT:    s_mov_b32 s1, s0
+; GCN-NEXT:    s_addc_u32 s9, s15, s0
+; GCN-NEXT:    s_xor_b64 s[8:9], s[8:9], s[0:1]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v9, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v10, s9
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s17, v1
 ; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
-; GCN-NEXT:    v_mac_f32_e32 v8, s18, v9
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s16, v0
-; GCN-NEXT:    v_rcp_f32_e32 v8, v8
-; GCN-NEXT:    v_cndmask_b32_e64 v10, 0, -1, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s17, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v7, v7, v10, vcc
+; GCN-NEXT:    v_mac_f32_e32 v9, s18, v10
+; GCN-NEXT:    v_cndmask_b32_e32 v7, v7, v8, vcc
+; GCN-NEXT:    v_rcp_f32_e32 v8, v9
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v3, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v3, s[2:3]
 ; GCN-NEXT:    v_mul_f32_e32 v3, s19, v8
 ; GCN-NEXT:    v_mul_f32_e32 v5, s20, v3
 ; GCN-NEXT:    v_trunc_f32_e32 v5, v5
@@ -7337,30 +7337,30 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_mul_lo_u32 v3, s8, v3
 ; GCN-NEXT:    v_mul_hi_u32 v4, s8, v2
 ; GCN-NEXT:    v_mul_lo_u32 v5, s9, v2
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s12, v0
 ; GCN-NEXT:    v_mul_lo_u32 v2, s8, v2
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s12, v0
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v8, vcc
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GCN-NEXT:    v_sub_i32_e64 v2, s[0:1], s10, v2
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s11, v3
 ; GCN-NEXT:    v_mov_b32_e32 v5, s9
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s10, v2
-; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v6, s[0:1], s8, v2
-; GCN-NEXT:    v_subbrev_u32_e64 v7, s[2:3], 0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s9, v7
-; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s8, v6
-; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s8, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v9, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s9, v7
-; GCN-NEXT:    v_cndmask_b32_e64 v8, v8, v9, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v8
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v7, v4, s[0:1]
+; GCN-NEXT:    v_subb_u32_e64 v4, vcc, v4, v5, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e64 v6, s[2:3], s8, v2
+; GCN-NEXT:    v_subbrev_u32_e64 v7, vcc, 0, v4, s[2:3]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s9, v7
+; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s8, v6
+; GCN-NEXT:    v_cndmask_b32_e64 v9, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s9, v7
+; GCN-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
+; GCN-NEXT:    v_subb_u32_e64 v4, vcc, v4, v5, s[2:3]
+; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, s8, v6
+; GCN-NEXT:    v_subbrev_u32_e32 v4, vcc, 0, v4, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v8
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v7, v4, s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v7, s11
-; GCN-NEXT:    v_subb_u32_e32 v3, vcc, v7, v3, vcc
+; GCN-NEXT:    v_subb_u32_e64 v3, vcc, v7, v3, s[0:1]
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s9, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s8, v2
@@ -7369,7 +7369,7 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_cndmask_b32_e32 v7, v7, v8, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
 ; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GCN-NEXT:    v_xor_b32_e32 v2, s14, v2
 ; GCN-NEXT:    v_xor_b32_e32 v3, s14, v3

diff  --git a/llvm/test/CodeGen/AMDGPU/ctlz.ll b/llvm/test/CodeGen/AMDGPU/ctlz.ll
index dc8d630bc7d4..c26af864e7fe 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz.ll
@@ -25,10 +25,10 @@ define amdgpu_kernel void @s_ctlz_i32(i32 addrspace(1)* noalias %out, i32 %val)
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_flbit_i32_b32 s0, s2
+; SI-NEXT:    s_cmp_lg_u32 s2, 0
+; SI-NEXT:    s_cselect_b32 s0, s0, 32
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s2, 0
-; SI-NEXT:    v_cndmask_b32_e32 v0, 32, v0, vcc
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -40,9 +40,9 @@ define amdgpu_kernel void @s_ctlz_i32(i32 addrspace(1)* noalias %out, i32 %val)
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_flbit_i32_b32 s1, s0
-; VI-NEXT:    v_mov_b32_e32 v0, s1
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s0, 0
-; VI-NEXT:    v_cndmask_b32_e32 v0, 32, v0, vcc
+; VI-NEXT:    s_cmp_lg_u32 s0, 0
+; VI-NEXT:    s_cselect_b32 s0, s1, 32
+; VI-NEXT:    v_mov_b32_e32 v0, s0
 ; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
 ;
@@ -384,14 +384,13 @@ define amdgpu_kernel void @s_ctlz_i64(i64 addrspace(1)* noalias %out, [8 x i32],
 ; SI-NEXT:    s_flbit_i32_b32 s0, s2
 ; SI-NEXT:    s_flbit_i32_b32 s1, s3
 ; SI-NEXT:    s_add_i32 s0, s0, 32
-; SI-NEXT:    s_or_b32 s2, s2, s3
-; SI-NEXT:    v_mov_b32_e32 v0, s1
-; SI-NEXT:    v_mov_b32_e32 v1, s0
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s2, 0
-; SI-NEXT:    v_cndmask_b32_e32 v0, 64, v0, vcc
+; SI-NEXT:    s_cmp_eq_u32 s3, 0
+; SI-NEXT:    s_cselect_b32 s0, s0, s1
+; SI-NEXT:    s_or_b32 s1, s2, s3
+; SI-NEXT:    s_cmp_lg_u32 s1, 0
+; SI-NEXT:    s_cselect_b32 s0, s0, 64
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
+; SI-NEXT:    v_mov_b32_e32 v0, s0
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -401,18 +400,17 @@ define amdgpu_kernel void @s_ctlz_i64(i64 addrspace(1)* noalias %out, [8 x i32],
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x4c
 ; VI-NEXT:    s_mov_b32 s7, 0xf000
 ; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    v_mov_b32_e32 v1, 0
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_flbit_i32_b32 s2, s0
-; VI-NEXT:    s_flbit_i32_b32 s3, s1
 ; VI-NEXT:    s_add_i32 s2, s2, 32
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; VI-NEXT:    s_flbit_i32_b32 s3, s1
+; VI-NEXT:    s_cmp_eq_u32 s1, 0
+; VI-NEXT:    s_cselect_b32 s2, s2, s3
 ; VI-NEXT:    s_or_b32 s0, s0, s1
-; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s0, 0
-; VI-NEXT:    v_cndmask_b32_e32 v0, 64, v0, vcc
-; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_cmp_lg_u32 s0, 0
+; VI-NEXT:    s_cselect_b32 s0, s2, 64
+; VI-NEXT:    v_mov_b32_e32 v0, s0
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
 ;
@@ -444,18 +442,17 @@ define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64
 ; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
-; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_flbit_i32_b32 s0, s2
 ; SI-NEXT:    s_flbit_i32_b32 s1, s3
 ; SI-NEXT:    s_add_i32 s0, s0, 32
-; SI-NEXT:    s_or_b32 s2, s2, s3
-; SI-NEXT:    v_mov_b32_e32 v0, s1
-; SI-NEXT:    v_mov_b32_e32 v1, s0
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s2, 0
-; SI-NEXT:    v_cndmask_b32_e32 v0, 64, v0, vcc
+; SI-NEXT:    s_cmp_eq_u32 s3, 0
+; SI-NEXT:    s_cselect_b32 s0, s0, s1
+; SI-NEXT:    s_or_b32 s1, s2, s3
+; SI-NEXT:    s_cmp_lg_u32 s1, 0
+; SI-NEXT:    s_cselect_b32 s0, s0, 64
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    v_mov_b32_e32 v0, s0
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -467,15 +464,14 @@ define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_flbit_i32_b32 s2, s0
-; VI-NEXT:    s_flbit_i32_b32 s3, s1
 ; VI-NEXT:    s_add_i32 s2, s2, 32
-; VI-NEXT:    v_mov_b32_e32 v0, s3
-; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; VI-NEXT:    s_flbit_i32_b32 s3, s1
+; VI-NEXT:    s_cmp_eq_u32 s1, 0
+; VI-NEXT:    s_cselect_b32 s2, s2, s3
 ; VI-NEXT:    s_or_b32 s0, s0, s1
-; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s0, 0
-; VI-NEXT:    v_cndmask_b32_e32 v0, 64, v0, vcc
+; VI-NEXT:    s_cmp_lg_u32 s0, 0
+; VI-NEXT:    s_cselect_b32 s0, s2, 64
+; VI-NEXT:    v_mov_b32_e32 v0, s0
 ; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
 ;

diff  --git a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
index 081cb188cfe0..a34fb5c503d6 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
@@ -99,14 +99,13 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8(i8 addrspace(1)* noalias %out, i
 
 ; FUNC-LABEL: {{^}}s_ctlz_zero_undef_i64:
 ; GCN: s_load_dwordx2 s{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, {{0x13|0x4c}}
-; GCN-DAG: v_cmp_eq_u32_e64 vcc, s[[HI]], 0{{$}}
+; GCN-DAG: s_cmp_eq_u32 s[[HI]], 0{{$}}
 ; GCN-DAG: s_flbit_i32_b32 [[FFBH_LO:s[0-9]+]], s[[LO]]
 ; GCN-DAG: s_add_i32 [[ADD:s[0-9]+]], [[FFBH_LO]], 32
 ; GCN-DAG: s_flbit_i32_b32 [[FFBH_HI:s[0-9]+]], s[[HI]]
-; GCN-DAG: v_mov_b32_e32 [[VFFBH_LO:v[0-9]+]], [[ADD]]
-; GCN-DAG: v_mov_b32_e32 [[VFFBH_HI:v[0-9]+]], [[FFBH_HI]]
-; GCN-DAG: v_cndmask_b32_e32 v[[CTLZ:[0-9]+]], [[VFFBH_HI]], [[VFFBH_LO]]
+; GCN-DAG: s_cselect_b32 [[RES:s[0-9]+]], [[ADD]], [[FFBH_HI]]
 ; GCN-DAG: v_mov_b32_e32 v[[CTLZ_HI:[0-9]+]], 0{{$}}
+; GCN-DAG: v_mov_b32_e32 v[[CTLZ:[0-9]+]], [[RES]]
 ; GCN: {{buffer|flat}}_store_dwordx2 v{{\[}}[[CTLZ]]:[[CTLZ_HI]]{{\]}}
 define amdgpu_kernel void @s_ctlz_zero_undef_i64(i64 addrspace(1)* noalias %out, [8 x i32], i64 %val) nounwind {
   %ctlz = call i64 @llvm.ctlz.i64(i64 %val, i1 true)

diff  --git a/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll
index cbcb29f1e83a..7b4d6f8d2f92 100644
--- a/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/cttz_zero_undef.ll
@@ -156,8 +156,8 @@ define amdgpu_kernel void @v_cttz_zero_undef_i16_with_select(i16 addrspace(1)* n
 }
 
 ; FUNC-LABEL: {{^}}v_cttz_zero_undef_i32_with_select:
-; SI: v_ffbl_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
-; SI: v_cmp_ne_u32_e32 vcc, 0
+; SI-DAG: v_ffbl_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
+; SI-DAG: v_cmp_ne_u32_e32 vcc, 0
 ; EG: MEM_RAT_CACHELESS STORE_RAW [[RESULT:T[0-9]+\.[XYZW]]]
 define amdgpu_kernel void @v_cttz_zero_undef_i32_with_select(i32 addrspace(1)* noalias %out, i32 addrspace(1)* nocapture readonly %arrayidx) nounwind {
   %val = load i32, i32 addrspace(1)* %arrayidx, align 1

diff  --git a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
index 4e4496859696..e9233632c9da 100644
--- a/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
+++ b/llvm/test/CodeGen/AMDGPU/dagcombine-select.ll
@@ -1,9 +1,10 @@
 ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
 
 ; GCN-LABEL: {{^}}select_and1:
-; GCN:     v_cndmask_b32_e32 [[SEL:v[0-9]+]], 0, v{{[0-9]+}},
+; GCN:     s_cselect_b32 [[SEL:s[0-9]+]], s{{[0-9]+}},
+; GCN:     v_mov_b32_e32 [[VSEL:v[0-9]+]], [[SEL]]
 ; GCN-NOT: v_and_b32
-; GCN:     store_dword v[{{[0-9:]+}}], [[SEL]],
+; GCN:     store_dword v[{{[0-9:]+}}], [[VSEL]],
 define amdgpu_kernel void @select_and1(i32 addrspace(1)* %p, i32 %x, i32 %y) {
   %c = icmp slt i32 %x, 11
   %s = select i1 %c, i32 0, i32 -1
@@ -13,9 +14,10 @@ define amdgpu_kernel void @select_and1(i32 addrspace(1)* %p, i32 %x, i32 %y) {
 }
 
 ; GCN-LABEL: {{^}}select_and2:
-; GCN:     v_cndmask_b32_e32 [[SEL:v[0-9]+]], 0, v{{[0-9]+}},
+; GCN:     s_cselect_b32 [[SEL:s[0-9]+]], s{{[0-9]+}},
+; GCN:     v_mov_b32_e32 [[VSEL:v[0-9]+]], [[SEL]]
 ; GCN-NOT: v_and_b32
-; GCN:     store_dword v[{{[0-9:]+}}], [[SEL]],
+; GCN:     store_dword v[{{[0-9:]+}}], [[VSEL]],
 define amdgpu_kernel void @select_and2(i32 addrspace(1)* %p, i32 %x, i32 %y) {
   %c = icmp slt i32 %x, 11
   %s = select i1 %c, i32 0, i32 -1
@@ -25,9 +27,10 @@ define amdgpu_kernel void @select_and2(i32 addrspace(1)* %p, i32 %x, i32 %y) {
 }
 
 ; GCN-LABEL: {{^}}select_and3:
-; GCN:     v_cndmask_b32_e32 [[SEL:v[0-9]+]], 0, v{{[0-9]+}},
+; GCN:     s_cselect_b32 [[SEL:s[0-9]+]], s{{[0-9]+}},
+; GCN:     v_mov_b32_e32 [[VSEL:v[0-9]+]], [[SEL]]
 ; GCN-NOT: v_and_b32
-; GCN:     store_dword v[{{[0-9:]+}}], [[SEL]],
+; GCN:     store_dword v[{{[0-9:]+}}], [[VSEL]],
 define amdgpu_kernel void @select_and3(i32 addrspace(1)* %p, i32 %x, i32 %y) {
   %c = icmp slt i32 %x, 11
   %s = select i1 %c, i32 -1, i32 0
@@ -52,9 +55,10 @@ define amdgpu_kernel void @select_and_v4(<4 x i32> addrspace(1)* %p, i32 %x, <4
 }
 
 ; GCN-LABEL: {{^}}select_or1:
-; GCN:     v_cndmask_b32_e32 [[SEL:v[0-9]+]], -1, v{{[0-9]+}},
+; GCN:     s_cselect_b32 [[SEL:s[0-9]+]], s{{[0-9]+}},
+; GCN:     v_mov_b32_e32 [[VSEL:v[0-9]+]], [[SEL]]
 ; GCN-NOT: v_or_b32
-; GCN:     store_dword v[{{[0-9:]+}}], [[SEL]],
+; GCN:     store_dword v[{{[0-9:]+}}], [[VSEL]],
 define amdgpu_kernel void @select_or1(i32 addrspace(1)* %p, i32 %x, i32 %y) {
   %c = icmp slt i32 %x, 11
   %s = select i1 %c, i32 0, i32 -1
@@ -64,9 +68,10 @@ define amdgpu_kernel void @select_or1(i32 addrspace(1)* %p, i32 %x, i32 %y) {
 }
 
 ; GCN-LABEL: {{^}}select_or2:
-; GCN:     v_cndmask_b32_e32 [[SEL:v[0-9]+]], -1, v{{[0-9]+}},
+; GCN:     s_cselect_b32 [[SEL:s[0-9]+]], s{{[0-9]+}},
+; GCN:     v_mov_b32_e32 [[VSEL:v[0-9]+]], [[SEL]]
 ; GCN-NOT: v_or_b32
-; GCN:     store_dword v[{{[0-9:]+}}], [[SEL]],
+; GCN:     store_dword v[{{[0-9:]+}}], [[VSEL]],
 define amdgpu_kernel void @select_or2(i32 addrspace(1)* %p, i32 %x, i32 %y) {
   %c = icmp slt i32 %x, 11
   %s = select i1 %c, i32 0, i32 -1
@@ -76,9 +81,10 @@ define amdgpu_kernel void @select_or2(i32 addrspace(1)* %p, i32 %x, i32 %y) {
 }
 
 ; GCN-LABEL: {{^}}select_or3:
-; GCN:     v_cndmask_b32_e32 [[SEL:v[0-9]+]], -1, v{{[0-9]+}},
+; GCN:     s_cselect_b32 [[SEL:s[0-9]+]], s{{[0-9]+}},
+; GCN:     v_mov_b32_e32 [[VSEL:v[0-9]+]], [[SEL]]
 ; GCN-NOT: v_or_b32
-; GCN:     store_dword v[{{[0-9:]+}}], [[SEL]],
+; GCN:     store_dword v[{{[0-9:]+}}], [[VSEL]],
 define amdgpu_kernel void @select_or3(i32 addrspace(1)* %p, i32 %x, i32 %y) {
   %c = icmp slt i32 %x, 11
   %s = select i1 %c, i32 -1, i32 0
@@ -103,7 +109,7 @@ define amdgpu_kernel void @select_or_v4(<4 x i32> addrspace(1)* %p, i32 %x, <4 x
 }
 
 ; GCN-LABEL: {{^}}sel_constants_sub_constant_sel_constants:
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 9,
+; GCN: s_cselect_b32 s{{[0-9]+}}, 9, 2
 define amdgpu_kernel void @sel_constants_sub_constant_sel_constants(i32 addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, i32 -4, i32 3
   %bo = sub i32 5, %sel
@@ -131,9 +137,8 @@ define amdgpu_kernel void @sel_constants_sub_constant_sel_constants_i16_neg(i16
 }
 
 ; GCN-LABEL: {{^}}sel_constants_sub_constant_sel_constants_v2i16:
-; GCN-DAG: v_mov_b32_e32 [[F:v[0-9]+]], 0x60002
-; GCN-DAG: v_mov_b32_e32 [[T:v[0-9]+]], 0x50009
-; GCN:     v_cndmask_b32_e32 v{{[0-9]+}}, [[F]], [[T]],
+; GCN-DAG: s_mov_b32 [[T:s[0-9]+]], 0x50009
+; GCN:     s_cselect_b32 s{{[0-9]+}}, [[T]], 0x60002
 define amdgpu_kernel void @sel_constants_sub_constant_sel_constants_v2i16(<2 x i16> addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, <2 x i16> <i16 -4, i16 2>, <2 x i16> <i16 3, i16 1>
   %bo = sub <2 x i16> <i16 5, i16 7>, %sel
@@ -154,7 +159,7 @@ define amdgpu_kernel void @sel_constants_sub_constant_sel_constants_v4i32(<4 x i
 }
 
 ; GCN-LABEL: {{^}}sdiv_constant_sel_constants_i64:
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 5, 0,
+; GCN: s_cselect_b32 s{{[0-9]+}}, 0, 5
 define amdgpu_kernel void @sdiv_constant_sel_constants_i64(i64 addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, i64 121, i64 23
   %bo = sdiv i64 120, %sel
@@ -163,7 +168,7 @@ define amdgpu_kernel void @sdiv_constant_sel_constants_i64(i64 addrspace(1)* %p,
 }
 
 ; GCN-LABEL: {{^}}sdiv_constant_sel_constants_i32:
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 8, 26,
+; GCN: s_cselect_b32 s{{[0-9]+}}, 26, 8
 define amdgpu_kernel void @sdiv_constant_sel_constants_i32(i32 addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, i32 7, i32 23
   %bo = sdiv i32 184, %sel
@@ -172,7 +177,7 @@ define amdgpu_kernel void @sdiv_constant_sel_constants_i32(i32 addrspace(1)* %p,
 }
 
 ; GCN-LABEL: {{^}}udiv_constant_sel_constants_i64:
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 5, 0,
+; GCN: s_cselect_b32 s{{[0-9]+}}, 0, 5
 define amdgpu_kernel void @udiv_constant_sel_constants_i64(i64 addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, i64 -4, i64 23
   %bo = udiv i64 120, %sel
@@ -181,7 +186,7 @@ define amdgpu_kernel void @udiv_constant_sel_constants_i64(i64 addrspace(1)* %p,
 }
 
 ; GCN-LABEL: {{^}}srem_constant_sel_constants:
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 3, 33,
+; GCN: s_cselect_b32 s{{[0-9]+}}, 33, 3
 define amdgpu_kernel void @srem_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, i64 34, i64 15
   %bo = srem i64 33, %sel
@@ -190,7 +195,7 @@ define amdgpu_kernel void @srem_constant_sel_constants(i64 addrspace(1)* %p, i1
 }
 
 ; GCN-LABEL: {{^}}urem_constant_sel_constants:
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 3, 33,
+; GCN: s_cselect_b32 s{{[0-9]+}}, 33, 3
 define amdgpu_kernel void @urem_constant_sel_constants(i64 addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, i64 34, i64 15
   %bo = urem i64 33, %sel
@@ -199,7 +204,7 @@ define amdgpu_kernel void @urem_constant_sel_constants(i64 addrspace(1)* %p, i1
 }
 
 ; GCN-LABEL: {{^}}shl_constant_sel_constants:
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 8, 4,
+; GCN: s_cselect_b32 s{{[0-9]+}}, 4, 8
 define amdgpu_kernel void @shl_constant_sel_constants(i32 addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, i32 2, i32 3
   %bo = shl i32 1, %sel
@@ -208,7 +213,7 @@ define amdgpu_kernel void @shl_constant_sel_constants(i32 addrspace(1)* %p, i1 %
 }
 
 ; GCN-LABEL: {{^}}lshr_constant_sel_constants:
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 8, 16,
+; GCN: s_cselect_b32 s{{[0-9]+}}, 16, 8
 define amdgpu_kernel void @lshr_constant_sel_constants(i32 addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, i32 2, i32 3
   %bo = lshr i32 64, %sel
@@ -217,7 +222,7 @@ define amdgpu_kernel void @lshr_constant_sel_constants(i32 addrspace(1)* %p, i1
 }
 
 ; GCN-LABEL: {{^}}ashr_constant_sel_constants:
-; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 16, 32,
+; GCN: s_cselect_b32 s{{[0-9]+}}, 32, 16
 define amdgpu_kernel void @ashr_constant_sel_constants(i32 addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, i32 2, i32 3
   %bo = ashr i32 128, %sel
@@ -247,8 +252,7 @@ define amdgpu_kernel void @fsub_constant_sel_constants_f16(half addrspace(1)* %p
 }
 
 ; GCN-LABEL: {{^}}fsub_constant_sel_constants_v2f16:
-; GCN-DAG: v_mov_b32_e32 [[T:v[0-9]+]], 0x45003c00
-; GCN:     v_cndmask_b32_e32 v{{[0-9]+}}, -2.0, [[T]],
+; GCN:     s_cselect_b32 s{{[0-9]+}}, 0x45003c00, -2.0
 define amdgpu_kernel void @fsub_constant_sel_constants_v2f16(<2 x half> addrspace(1)* %p, i1 %cond) {
   %sel = select i1 %cond, <2 x half> <half -2.0, half -3.0>, <2 x half> <half -1.0, half 4.0>
   %bo = fsub <2 x half> <half -1.0, half 2.0>, %sel

diff  --git a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
index 1ec749059452..a14e2ae48504 100644
--- a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
@@ -18,12 +18,12 @@ entry:
 
 ; GCN-LABEL: {{^}}int4_extelt:
 ; GCN-NOT: buffer_
-; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
-; GCN-DAG: v_cmp_ne_u32_e64 [[C2:[^,]+]], [[IDX]], 2
-; GCN-DAG: v_cmp_ne_u32_e64 [[C3:[^,]+]], [[IDX]], 3
+; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 2
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX]], 1
+; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 0, 1, [[C1]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], 2, [[V1]], [[C2]]
-; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], 4, [[V2]], [[C3]]
+; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], 2, [[V1]], vcc
+; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], 4, [[V2]], vcc
 ; GCN: store_dword v[{{[0-9:]+}}], [[V3]]
 define amdgpu_kernel void @int4_extelt(i32 addrspace(1)* %out, i32 %sel) {
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/extractelt-to-trunc.ll b/llvm/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
index 1165cd82a299..8dd0f46c9303 100644
--- a/llvm/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
+++ b/llvm/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
@@ -147,23 +147,26 @@ define amdgpu_kernel void @no_extract_volatile_load_dynextract(i32 addrspace(1)*
 ; GCN-LABEL: no_extract_volatile_load_dynextract:
 ; GCN:       ; %bb.0: ; %entry
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s12, s[0:1], 0xd
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    s_mov_b32 s2, -1
-; GCN-NEXT:    s_load_dword s12, s[0:1], 0xd
 ; GCN-NEXT:    s_mov_b32 s10, s2
 ; GCN-NEXT:    s_mov_b32 s11, s3
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    s_mov_b32 s8, s6
 ; GCN-NEXT:    s_mov_b32 s9, s7
 ; GCN-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; GCN-NEXT:    s_mov_b32 s0, s4
-; GCN-NEXT:    s_mov_b32 s1, s5
-; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s12, 1
+; GCN-NEXT:    s_cmp_eq_u32 s12, 1
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s12, 2
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s12, 2
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s12, 3
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s12, 3
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/fceil64.ll b/llvm/test/CodeGen/AMDGPU/fceil64.ll
index da852af3f230..f0cd6a1df9ef 100644
--- a/llvm/test/CodeGen/AMDGPU/fceil64.ll
+++ b/llvm/test/CodeGen/AMDGPU/fceil64.ll
@@ -27,7 +27,7 @@ declare <16 x double> @llvm.ceil.v16f64(<16 x double>) nounwind readnone
 ; SI-DAG: v_cmp_gt_f64
 ; SI-DAG: v_cmp_lg_f64
 ; SI-DAG: v_cndmask_b32
-; SI: v_cndmask_b32
+; SI: s_cselect_b32
 ; SI: v_add_f64
 ; SI: s_endpgm
 define amdgpu_kernel void @fceil_f64(double addrspace(1)* %out, double %x) {

diff  --git a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir
index 7bc14939624d..22775ec82714 100644
--- a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir
+++ b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies.mir
@@ -1,4 +1,4 @@
-# RUN: llc -march=amdgcn -run-pass=si-fix-sgpr-copies -o - %s | FileCheck --check-prefix=GCN %s
+# RUN: llc -march=amdgcn -run-pass=si-fix-sgpr-copies -verify-machineinstrs -o - %s | FileCheck --check-prefix=GCN %s
 
 # GCN-LABEL: name: fix-sgpr-copies
 # GCN: V_ADD_I32_e32
@@ -110,3 +110,19 @@ body:               |
     %3:vreg_64 = REG_SEQUENCE %2, %subreg.sub0, %0, %subreg.sub1
     $vgpr3 = COPY %3.sub0
 ...
+
+# Test to ensure that undef SCC gets properly propagated.
+# GCN-LABEL: name: scc_undef
+# GCN: S_CSELECT_B64 -1, 0, implicit undef $scc
+# GCN: V_CNDMASK
+---
+name: scc_undef
+tracksRegLiveness: true
+
+body:               |
+  bb.0:
+  %1:vgpr_32 = IMPLICIT_DEF
+  %2:sreg_32 = S_MOV_B32 1
+  %3:sreg_32 = COPY %1:vgpr_32
+  %4:sreg_32 = S_CSELECT_B32 killed %2:sreg_32, killed %3:sreg_32, implicit undef $scc
+---

diff  --git a/llvm/test/CodeGen/AMDGPU/fshl.ll b/llvm/test/CodeGen/AMDGPU/fshl.ll
index 0f2d16ebcc2c..9f3255f4e500 100644
--- a/llvm/test/CodeGen/AMDGPU/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshl.ll
@@ -18,11 +18,12 @@ define amdgpu_kernel void @fshl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_sub_i32 s3, 32, s2
 ; SI-NEXT:    v_mov_b32_e32 v0, s1
-; SI-NEXT:    v_mov_b32_e32 v1, s3
 ; SI-NEXT:    s_and_b32 s1, s2, 31
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    s_cmp_eq_u32 s1, 0
 ; SI-NEXT:    v_alignbit_b32 v0, s0, v0, v1
 ; SI-NEXT:    v_mov_b32_e32 v1, s0
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -35,10 +36,11 @@ define amdgpu_kernel void @fshl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; VI-NEXT:    s_sub_i32 s3, 32, s2
 ; VI-NEXT:    v_mov_b32_e32 v0, s1
 ; VI-NEXT:    s_and_b32 s1, s2, 31
-; VI-NEXT:    v_mov_b32_e32 v2, s3
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    s_cmp_eq_u32 s1, 0
+; VI-NEXT:    v_alignbit_b32 v0, s0, v0, v1
 ; VI-NEXT:    v_mov_b32_e32 v1, s0
-; VI-NEXT:    v_alignbit_b32 v0, s0, v0, v2
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
@@ -53,10 +55,11 @@ define amdgpu_kernel void @fshl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; GFX9-NEXT:    s_sub_i32 s3, 32, s2
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
 ; GFX9-NEXT:    s_and_b32 s1, s2, 31
-; GFX9-NEXT:    v_mov_b32_e32 v2, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    s_cmp_eq_u32 s1, 0
+; GFX9-NEXT:    v_alignbit_b32 v0, s0, v0, v1
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s0
-; GFX9-NEXT:    v_alignbit_b32 v0, s0, v0, v2
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
@@ -150,19 +153,21 @@ define amdgpu_kernel void @fshl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
 ; SI-NEXT:    s_sub_i32 s10, 32, s1
-; SI-NEXT:    v_mov_b32_e32 v1, s10
 ; SI-NEXT:    s_and_b32 s1, s1, 31
+; SI-NEXT:    v_mov_b32_e32 v1, s10
+; SI-NEXT:    s_cmp_eq_u32 s1, 0
 ; SI-NEXT:    v_alignbit_b32 v0, s3, v0, v1
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
 ; SI-NEXT:    s_sub_i32 s1, 32, s0
-; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; SI-NEXT:    s_and_b32 s0, s0, 31
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
+; SI-NEXT:    s_cmp_eq_u32 s0, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
 ; SI-NEXT:    v_mov_b32_e32 v2, s1
 ; SI-NEXT:    v_alignbit_b32 v0, s2, v0, v2
 ; SI-NEXT:    v_mov_b32_e32 v2, s2
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -176,19 +181,21 @@ define amdgpu_kernel void @fshl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s7
 ; VI-NEXT:    s_sub_i32 s8, 32, s1
-; VI-NEXT:    v_mov_b32_e32 v1, s8
 ; VI-NEXT:    s_and_b32 s1, s1, 31
+; VI-NEXT:    v_mov_b32_e32 v1, s8
+; VI-NEXT:    s_cmp_eq_u32 s1, 0
 ; VI-NEXT:    v_alignbit_b32 v0, s5, v0, v1
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    s_sub_i32 s1, 32, s0
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; VI-NEXT:    s_and_b32 s0, s0, 31
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
+; VI-NEXT:    s_cmp_eq_u32 s0, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s6
 ; VI-NEXT:    v_mov_b32_e32 v2, s1
 ; VI-NEXT:    v_alignbit_b32 v0, s4, v0, v2
 ; VI-NEXT:    v_mov_b32_e32 v2, s4
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
@@ -204,19 +211,21 @@ define amdgpu_kernel void @fshl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s7
 ; GFX9-NEXT:    s_sub_i32 s8, 32, s1
-; GFX9-NEXT:    v_mov_b32_e32 v1, s8
 ; GFX9-NEXT:    s_and_b32 s1, s1, 31
+; GFX9-NEXT:    v_mov_b32_e32 v1, s8
+; GFX9-NEXT:    s_cmp_eq_u32 s1, 0
 ; GFX9-NEXT:    v_alignbit_b32 v0, s5, v0, v1
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX9-NEXT:    s_sub_i32 s1, 32, s0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; GFX9-NEXT:    s_and_b32 s0, s0, 31
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
+; GFX9-NEXT:    s_cmp_eq_u32 s0, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s1
 ; GFX9-NEXT:    v_alignbit_b32 v0, s4, v0, v2
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s3
@@ -327,35 +336,39 @@ define amdgpu_kernel void @fshl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s15
 ; SI-NEXT:    s_sub_i32 s16, 32, s3
-; SI-NEXT:    v_mov_b32_e32 v1, s16
 ; SI-NEXT:    s_and_b32 s3, s3, 31
+; SI-NEXT:    v_mov_b32_e32 v1, s16
+; SI-NEXT:    s_cmp_eq_u32 s3, 0
 ; SI-NEXT:    v_alignbit_b32 v0, s11, v0, v1
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s11
 ; SI-NEXT:    s_sub_i32 s3, 32, s2
-; SI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; SI-NEXT:    s_and_b32 s2, s2, 31
+; SI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; SI-NEXT:    s_cmp_eq_u32 s2, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s14
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
 ; SI-NEXT:    v_alignbit_b32 v0, s10, v0, v1
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s10
 ; SI-NEXT:    s_sub_i32 s2, 32, s1
-; SI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; SI-NEXT:    s_and_b32 s1, s1, 31
+; SI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; SI-NEXT:    s_cmp_eq_u32 s1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s13
 ; SI-NEXT:    v_mov_b32_e32 v1, s2
 ; SI-NEXT:    v_alignbit_b32 v0, s9, v0, v1
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s9
 ; SI-NEXT:    s_sub_i32 s1, 32, s0
-; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; SI-NEXT:    s_and_b32 s0, s0, 31
+; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
+; SI-NEXT:    s_cmp_eq_u32 s0, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s12
 ; SI-NEXT:    v_mov_b32_e32 v4, s1
 ; SI-NEXT:    v_alignbit_b32 v0, s8, v0, v4
 ; SI-NEXT:    v_mov_b32_e32 v4, s8
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -369,35 +382,39 @@ define amdgpu_kernel void @fshl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s11
 ; VI-NEXT:    s_sub_i32 s14, 32, s3
-; VI-NEXT:    v_mov_b32_e32 v1, s14
 ; VI-NEXT:    s_and_b32 s3, s3, 31
+; VI-NEXT:    v_mov_b32_e32 v1, s14
+; VI-NEXT:    s_cmp_eq_u32 s3, 0
 ; VI-NEXT:    v_alignbit_b32 v0, s7, v0, v1
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s7
 ; VI-NEXT:    s_sub_i32 s3, 32, s2
-; VI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; VI-NEXT:    s_and_b32 s2, s2, 31
+; VI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; VI-NEXT:    s_cmp_eq_u32 s2, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
 ; VI-NEXT:    v_mov_b32_e32 v1, s3
 ; VI-NEXT:    v_alignbit_b32 v0, s6, v0, v1
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s6
 ; VI-NEXT:    s_sub_i32 s2, 32, s1
-; VI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; VI-NEXT:    s_and_b32 s1, s1, 31
+; VI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; VI-NEXT:    s_cmp_eq_u32 s1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
 ; VI-NEXT:    v_mov_b32_e32 v1, s2
 ; VI-NEXT:    v_alignbit_b32 v0, s5, v0, v1
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    s_sub_i32 s1, 32, s0
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; VI-NEXT:    s_and_b32 s0, s0, 31
+; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
+; VI-NEXT:    s_cmp_eq_u32 s0, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
 ; VI-NEXT:    v_mov_b32_e32 v4, s1
 ; VI-NEXT:    v_alignbit_b32 v0, s4, v0, v4
 ; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; VI-NEXT:    v_mov_b32_e32 v4, s12
 ; VI-NEXT:    v_mov_b32_e32 v5, s13
@@ -413,35 +430,39 @@ define amdgpu_kernel void @fshl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s11
 ; GFX9-NEXT:    s_sub_i32 s14, 32, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s14
 ; GFX9-NEXT:    s_and_b32 s3, s3, 31
+; GFX9-NEXT:    v_mov_b32_e32 v1, s14
+; GFX9-NEXT:    s_cmp_eq_u32 s3, 0
 ; GFX9-NEXT:    v_alignbit_b32 v0, s7, v0, v1
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX9-NEXT:    s_sub_i32 s3, 32, s2
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GFX9-NEXT:    s_and_b32 s2, s2, 31
+; GFX9-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; GFX9-NEXT:    s_cmp_eq_u32 s2, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s10
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX9-NEXT:    v_alignbit_b32 v0, s6, v0, v1
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s6
 ; GFX9-NEXT:    s_sub_i32 s2, 32, s1
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GFX9-NEXT:    s_and_b32 s1, s1, 31
+; GFX9-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GFX9-NEXT:    s_cmp_eq_u32 s1, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s9
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s2
 ; GFX9-NEXT:    v_alignbit_b32 v0, s5, v0, v1
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX9-NEXT:    s_sub_i32 s1, 32, s0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; GFX9-NEXT:    s_and_b32 s0, s0, 31
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
+; GFX9-NEXT:    s_cmp_eq_u32 s0, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s1
 ; GFX9-NEXT:    v_alignbit_b32 v0, s4, v0, v4
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s4
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s12
 ; GFX9-NEXT:    v_mov_b32_e32 v5, s13

diff  --git a/llvm/test/CodeGen/AMDGPU/fshr.ll b/llvm/test/CodeGen/AMDGPU/fshr.ll
index e27cbe73a0bd..2dadb5e80dfd 100644
--- a/llvm/test/CodeGen/AMDGPU/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshr.ll
@@ -140,14 +140,16 @@ define amdgpu_kernel void @fshr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
 ; SI-NEXT:    s_and_b32 s1, s1, 31
 ; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    s_cmp_eq_u32 s1, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    s_and_b32 s0, s0, 31
 ; SI-NEXT:    v_alignbit_b32 v1, s3, v0, v1
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
+; SI-NEXT:    s_cmp_eq_u32 s0, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
 ; SI-NEXT:    v_mov_b32_e32 v2, s0
 ; SI-NEXT:    v_alignbit_b32 v2, s2, v0, v2
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -162,14 +164,16 @@ define amdgpu_kernel void @fshr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; VI-NEXT:    v_mov_b32_e32 v0, s7
 ; VI-NEXT:    s_and_b32 s1, s1, 31
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_cmp_eq_u32 s1, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    s_and_b32 s0, s0, 31
 ; VI-NEXT:    v_alignbit_b32 v1, s5, v0, v1
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
+; VI-NEXT:    s_cmp_eq_u32 s0, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s6
 ; VI-NEXT:    v_mov_b32_e32 v2, s0
 ; VI-NEXT:    v_alignbit_b32 v2, s4, v0, v2
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
@@ -186,14 +190,16 @@ define amdgpu_kernel void @fshr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s7
 ; GFX9-NEXT:    s_and_b32 s1, s1, 31
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    s_cmp_eq_u32 s1, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    s_and_b32 s0, s0, 31
 ; GFX9-NEXT:    v_alignbit_b32 v1, s5, v0, v1
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
+; GFX9-NEXT:    s_cmp_eq_u32 s0, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX9-NEXT:    v_alignbit_b32 v2, s4, v0, v2
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s3
@@ -303,26 +309,30 @@ define amdgpu_kernel void @fshr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; SI-NEXT:    v_mov_b32_e32 v0, s15
 ; SI-NEXT:    s_and_b32 s3, s3, 31
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_alignbit_b32 v1, s11, v0, v1
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; SI-NEXT:    s_cmp_eq_u32 s3, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    s_and_b32 s2, s2, 31
+; SI-NEXT:    v_alignbit_b32 v1, s11, v0, v1
 ; SI-NEXT:    v_cndmask_b32_e32 v3, v1, v0, vcc
+; SI-NEXT:    s_cmp_eq_u32 s2, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s14
 ; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    v_alignbit_b32 v1, s10, v0, v1
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    s_and_b32 s1, s1, 31
+; SI-NEXT:    v_alignbit_b32 v1, s10, v0, v1
 ; SI-NEXT:    v_cndmask_b32_e32 v2, v1, v0, vcc
+; SI-NEXT:    s_cmp_eq_u32 s1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s13
 ; SI-NEXT:    v_mov_b32_e32 v1, s1
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    s_and_b32 s0, s0, 31
 ; SI-NEXT:    v_alignbit_b32 v1, s9, v0, v1
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
+; SI-NEXT:    s_cmp_eq_u32 s0, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s12
 ; SI-NEXT:    v_mov_b32_e32 v4, s0
 ; SI-NEXT:    v_alignbit_b32 v4, s8, v0, v4
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; SI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -337,26 +347,30 @@ define amdgpu_kernel void @fshr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; VI-NEXT:    v_mov_b32_e32 v0, s11
 ; VI-NEXT:    s_and_b32 s3, s3, 31
 ; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    v_alignbit_b32 v1, s7, v0, v1
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; VI-NEXT:    s_cmp_eq_u32 s3, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    s_and_b32 s2, s2, 31
+; VI-NEXT:    v_alignbit_b32 v1, s7, v0, v1
 ; VI-NEXT:    v_cndmask_b32_e32 v3, v1, v0, vcc
+; VI-NEXT:    s_cmp_eq_u32 s2, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
 ; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    v_alignbit_b32 v1, s6, v0, v1
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    s_and_b32 s1, s1, 31
+; VI-NEXT:    v_alignbit_b32 v1, s6, v0, v1
 ; VI-NEXT:    v_cndmask_b32_e32 v2, v1, v0, vcc
+; VI-NEXT:    s_cmp_eq_u32 s1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    s_and_b32 s0, s0, 31
 ; VI-NEXT:    v_alignbit_b32 v1, s5, v0, v1
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
+; VI-NEXT:    s_cmp_eq_u32 s0, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
 ; VI-NEXT:    v_mov_b32_e32 v4, s0
 ; VI-NEXT:    v_alignbit_b32 v4, s4, v0, v4
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v4, s12
 ; VI-NEXT:    v_mov_b32_e32 v5, s13
@@ -373,26 +387,30 @@ define amdgpu_kernel void @fshr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s11
 ; GFX9-NEXT:    s_and_b32 s3, s3, 31
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-NEXT:    v_alignbit_b32 v1, s7, v0, v1
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s3, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    s_and_b32 s2, s2, 31
+; GFX9-NEXT:    v_alignbit_b32 v1, s7, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v0, vcc
+; GFX9-NEXT:    s_cmp_eq_u32 s2, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s10
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    v_alignbit_b32 v1, s6, v0, v1
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    s_and_b32 s1, s1, 31
+; GFX9-NEXT:    v_alignbit_b32 v1, s6, v0, v1
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v1, v0, vcc
+; GFX9-NEXT:    s_cmp_eq_u32 s1, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s9
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    s_and_b32 s0, s0, 31
 ; GFX9-NEXT:    v_alignbit_b32 v1, s5, v0, v1
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
+; GFX9-NEXT:    s_cmp_eq_u32 s0, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX9-NEXT:    v_alignbit_b32 v4, s4, v0, v4
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
+; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s12
 ; GFX9-NEXT:    v_mov_b32_e32 v5, s13

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
index 851d232e00a6..1ae10d7fe48c 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
@@ -38,15 +38,19 @@ entry:
 ; GCN-LABEL: {{^}}int4_inselt:
 ; GCN-NOT: v_movrel
 ; GCN-NOT: buffer_
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX:s[0-9]+]], 3
-; GCN-DAG: v_cndmask_b32_e32 v[[ELT_LAST:[0-9]+]], 1, v{{[0-9]+}}, [[CC1]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX]], 2
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}, [[CC2]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC3:[^,]+]], [[IDX]], 1
-; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}, [[CC3]]
-; GCN-DAG: v_cmp_ne_u32_e64 [[CC4:[^,]+]], [[IDX]], 0
-; GCN-DAG: v_cndmask_b32_e32 v[[ELT_FIRST:[0-9]+]], 1, v{{[0-9]+}}, [[CC4]]
-; GCN:     flat_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[ELT_FIRST]]:[[ELT_LAST]]]
+; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 3
+; GCN-DAG: s_cselect_b32 s[[ELT_3:[0-9]+]], s{{[0-9]+}}, 1
+; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
+; GCN-DAG: s_cselect_b32 s[[ELT_2:[0-9]+]], s{{[0-9]+}}, 1
+; GCN-DAG: s_cmp_lg_u32 [[IDX]], 1
+; GCN-DAG: s_cselect_b32 s[[ELT_1:[0-9]+]], s{{[0-9]+}}, 1
+; GCN-DAG: s_cmp_lg_u32 [[IDX]], 0
+; GCN-DAG: s_cselect_b32 s[[ELT_0:[0-9]+]], s{{[0-9]+}}, 1
+; GCN-DAG: v_mov_b32_e32 v[[VELT_0:[0-9]+]], s[[ELT_0]]
+; GCN-DAG: v_mov_b32_e32 v[[VELT_1:[0-9]+]], s[[ELT_1]]
+; GCN-DAG: v_mov_b32_e32 v[[VELT_2:[0-9]+]], s[[ELT_2]]
+; GCN-DAG: v_mov_b32_e32 v[[VELT_3:[0-9]+]], s[[ELT_3]]
+; GCN:     flat_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[VELT_0]]:[[VELT_3]]]
 define amdgpu_kernel void @int4_inselt(<4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %sel) {
 entry:
   %v = insertelement <4 x i32> %vec, i32 1, i32 %sel

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 60a2582741f1..c53f2b07aa7c 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -579,12 +579,12 @@ define amdgpu_kernel void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %
 ; SI-NEXT:    s_mov_b32 s3, 0x100f000
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s7
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
-; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, s6
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
-; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; SI-NEXT:    s_cmp_lg_u32 s4, 1
+; SI-NEXT:    s_cselect_b32 s5, s7, 5
+; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    s_cselect_b32 s4, s6, 5
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -596,12 +596,12 @@ define amdgpu_kernel void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %
 ; VI-NEXT:    s_mov_b32 s3, 0x1100f000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s7
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
-; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
-; VI-NEXT:    v_mov_b32_e32 v0, s6
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
-; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; VI-NEXT:    s_cmp_lg_u32 s4, 1
+; VI-NEXT:    s_cselect_b32 s5, s7, 5
+; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    s_cselect_b32 s4, s6, 5
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <2 x i32> %a, i32 5, i32 %b
@@ -618,15 +618,15 @@ define amdgpu_kernel void @dynamic_insertelement_v3i32(<3 x i32> addrspace(1)* %
 ; SI-NEXT:    s_mov_b32 s3, 0x100f000
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s10
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
-; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, s9
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
-; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
-; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; SI-NEXT:    s_cmp_lg_u32 s4, 2
+; SI-NEXT:    s_cselect_b32 s5, s10, 5
+; SI-NEXT:    s_cmp_lg_u32 s4, 1
+; SI-NEXT:    s_cselect_b32 s6, s9, 5
+; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    s_cselect_b32 s4, s8, 5
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s6
+; SI-NEXT:    v_mov_b32_e32 v2, s5
 ; SI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -638,15 +638,15 @@ define amdgpu_kernel void @dynamic_insertelement_v3i32(<3 x i32> addrspace(1)* %
 ; VI-NEXT:    s_mov_b32 s3, 0x1100f000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
-; VI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
-; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
-; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
-; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
+; VI-NEXT:    s_cmp_lg_u32 s4, 2
+; VI-NEXT:    s_cselect_b32 s5, s10, 5
+; VI-NEXT:    s_cmp_lg_u32 s4, 1
+; VI-NEXT:    s_cselect_b32 s6, s9, 5
+; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    s_cselect_b32 s4, s8, 5
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s6
+; VI-NEXT:    v_mov_b32_e32 v2, s5
 ; VI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <3 x i32> %a, i32 5, i32 %b
@@ -664,19 +664,18 @@ define amdgpu_kernel void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %
 ; SI-NEXT:    s_mov_b32 s3, 0x100f000
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s11
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 3
-; SI-NEXT:    v_mov_b32_e32 v4, s4
-; SI-NEXT:    v_cndmask_b32_e32 v3, v0, v4, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, s10
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 2
-; SI-NEXT:    v_cndmask_b32_e32 v2, v0, v4, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, s9
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 1
-; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v4, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 0
-; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
+; SI-NEXT:    s_cmp_eq_u32 s6, 3
+; SI-NEXT:    s_cselect_b32 s5, s4, s11
+; SI-NEXT:    s_cmp_eq_u32 s6, 2
+; SI-NEXT:    s_cselect_b32 s7, s4, s10
+; SI-NEXT:    s_cmp_eq_u32 s6, 1
+; SI-NEXT:    s_cselect_b32 s9, s4, s9
+; SI-NEXT:    s_cmp_eq_u32 s6, 0
+; SI-NEXT:    s_cselect_b32 s4, s4, s8
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_mov_b32_e32 v2, s7
+; SI-NEXT:    v_mov_b32_e32 v3, s5
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -689,19 +688,18 @@ define amdgpu_kernel void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %
 ; VI-NEXT:    s_mov_b32 s3, 0x1100f000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s11
-; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 3
-; VI-NEXT:    v_cndmask_b32_e32 v3, v0, v4, vcc
-; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 2
-; VI-NEXT:    v_cndmask_b32_e32 v2, v0, v4, vcc
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 1
-; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v4, vcc
-; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 0
-; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
+; VI-NEXT:    s_cmp_eq_u32 s6, 3
+; VI-NEXT:    s_cselect_b32 s5, s4, s11
+; VI-NEXT:    s_cmp_eq_u32 s6, 2
+; VI-NEXT:    s_cselect_b32 s7, s4, s10
+; VI-NEXT:    s_cmp_eq_u32 s6, 1
+; VI-NEXT:    s_cselect_b32 s9, s4, s9
+; VI-NEXT:    s_cmp_eq_u32 s6, 0
+; VI-NEXT:    s_cselect_b32 s4, s4, s8
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:    v_mov_b32_e32 v2, s7
+; VI-NEXT:    v_mov_b32_e32 v3, s5
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <4 x i32> %a, i32 %val, i32 %b
@@ -718,32 +716,32 @@ define amdgpu_kernel void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %
 ; SI-NEXT:    s_mov_b32 s3, 0x100f000
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v0, s11
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
-; SI-NEXT:    v_cndmask_b32_e32 v3, 5, v0, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, s10
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
-; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, s9
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
-; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
-; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
-; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, s15
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
-; SI-NEXT:    v_cndmask_b32_e32 v7, 5, v4, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, s14
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
-; SI-NEXT:    v_cndmask_b32_e32 v6, 5, v4, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, s13
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
-; SI-NEXT:    v_cndmask_b32_e32 v5, 5, v4, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, s12
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
-; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
-; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
-; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; SI-NEXT:    s_cmp_lg_u32 s4, 3
+; SI-NEXT:    s_cselect_b32 s5, s11, 5
+; SI-NEXT:    s_cmp_lg_u32 s4, 2
+; SI-NEXT:    s_cselect_b32 s6, s10, 5
+; SI-NEXT:    s_cmp_lg_u32 s4, 1
+; SI-NEXT:    s_cselect_b32 s7, s9, 5
+; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    s_cselect_b32 s8, s8, 5
+; SI-NEXT:    s_cmp_lg_u32 s4, 7
+; SI-NEXT:    s_cselect_b32 s9, s15, 5
+; SI-NEXT:    s_cmp_lg_u32 s4, 6
+; SI-NEXT:    s_cselect_b32 s10, s14, 5
+; SI-NEXT:    s_cmp_lg_u32 s4, 5
+; SI-NEXT:    s_cselect_b32 s11, s13, 5
+; SI-NEXT:    s_cmp_lg_u32 s4, 4
+; SI-NEXT:    s_cselect_b32 s4, s12, 5
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_mov_b32_e32 v2, s10
+; SI-NEXT:    v_mov_b32_e32 v3, s9
+; SI-NEXT:    v_mov_b32_e32 v4, s8
+; SI-NEXT:    v_mov_b32_e32 v5, s7
+; SI-NEXT:    v_mov_b32_e32 v6, s6
+; SI-NEXT:    v_mov_b32_e32 v7, s5
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: dynamic_insertelement_v8i32:
@@ -754,32 +752,32 @@ define amdgpu_kernel void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %
 ; VI-NEXT:    s_mov_b32 s3, 0x1100f000
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    v_mov_b32_e32 v0, s11
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
-; VI-NEXT:    v_cndmask_b32_e32 v3, 5, v0, vcc
-; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
-; VI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
-; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
-; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
-; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
-; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
-; VI-NEXT:    v_mov_b32_e32 v4, s15
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
-; VI-NEXT:    v_cndmask_b32_e32 v7, 5, v4, vcc
-; VI-NEXT:    v_mov_b32_e32 v4, s14
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
-; VI-NEXT:    v_cndmask_b32_e32 v6, 5, v4, vcc
-; VI-NEXT:    v_mov_b32_e32 v4, s13
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
-; VI-NEXT:    v_cndmask_b32_e32 v5, 5, v4, vcc
-; VI-NEXT:    v_mov_b32_e32 v4, s12
-; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
-; VI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
-; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
-; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; VI-NEXT:    s_cmp_lg_u32 s4, 3
+; VI-NEXT:    s_cselect_b32 s5, s11, 5
+; VI-NEXT:    s_cmp_lg_u32 s4, 2
+; VI-NEXT:    s_cselect_b32 s6, s10, 5
+; VI-NEXT:    s_cmp_lg_u32 s4, 1
+; VI-NEXT:    s_cselect_b32 s7, s9, 5
+; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    s_cselect_b32 s8, s8, 5
+; VI-NEXT:    s_cmp_lg_u32 s4, 7
+; VI-NEXT:    s_cselect_b32 s9, s15, 5
+; VI-NEXT:    s_cmp_lg_u32 s4, 6
+; VI-NEXT:    s_cselect_b32 s10, s14, 5
+; VI-NEXT:    s_cmp_lg_u32 s4, 5
+; VI-NEXT:    s_cselect_b32 s11, s13, 5
+; VI-NEXT:    s_cmp_lg_u32 s4, 4
+; VI-NEXT:    s_cselect_b32 s4, s12, 5
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s11
+; VI-NEXT:    v_mov_b32_e32 v2, s10
+; VI-NEXT:    v_mov_b32_e32 v3, s9
+; VI-NEXT:    v_mov_b32_e32 v4, s8
+; VI-NEXT:    v_mov_b32_e32 v5, s7
+; VI-NEXT:    v_mov_b32_e32 v6, s6
+; VI-NEXT:    v_mov_b32_e32 v7, s5
+; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
   %vecins = insertelement <8 x i32> %a, i32 5, i32 %b
   store <8 x i32> %vecins, <8 x i32> addrspace(1)* %out, align 32
@@ -1123,107 +1121,95 @@ define amdgpu_kernel void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x0
 ; SI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x4
 ; SI-NEXT:    s_load_dword s4, s[4:5], 0x8
+; SI-NEXT:    s_movk_i32 s7, 0xff
 ; SI-NEXT:    s_mov_b32 s3, 0x100f000
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_lshr_b32 s5, s11, 24
-; SI-NEXT:    v_mov_b32_e32 v0, s5
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 15
-; SI-NEXT:    s_lshr_b32 s5, s11, 16
-; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
-; SI-NEXT:    v_mov_b32_e32 v1, s5
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 14
-; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; SI-NEXT:    s_movk_i32 s5, 0xff
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
-; SI-NEXT:    v_and_b32_e32 v1, s5, v1
+; SI-NEXT:    s_cmp_lg_u32 s4, 15
+; SI-NEXT:    s_cselect_b32 s5, s5, 5
+; SI-NEXT:    s_lshl_b32 s5, s5, 8
+; SI-NEXT:    s_lshr_b32 s6, s11, 16
+; SI-NEXT:    s_cmp_lg_u32 s4, 14
+; SI-NEXT:    s_cselect_b32 s6, s6, 5
+; SI-NEXT:    s_and_b32 s6, s6, s7
+; SI-NEXT:    s_or_b32 s5, s6, s5
+; SI-NEXT:    s_lshl_b32 s5, s5, 16
 ; SI-NEXT:    s_lshr_b32 s6, s11, 8
-; SI-NEXT:    v_or_b32_e32 v0, v1, v0
-; SI-NEXT:    v_mov_b32_e32 v1, s6
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 13
-; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v2, s11
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 12
-; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v2, vcc
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
-; SI-NEXT:    v_and_b32_e32 v2, s5, v2
-; SI-NEXT:    v_or_b32_e32 v1, v2, v1
-; SI-NEXT:    s_mov_b32 s6, 0xffff
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT:    v_and_b32_e32 v1, s6, v1
-; SI-NEXT:    s_lshr_b32 s7, s10, 24
-; SI-NEXT:    v_or_b32_e32 v3, v1, v0
-; SI-NEXT:    v_mov_b32_e32 v0, s7
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 11
-; SI-NEXT:    s_lshr_b32 s7, s10, 16
-; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
-; SI-NEXT:    v_mov_b32_e32 v1, s7
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 10
-; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
-; SI-NEXT:    v_and_b32_e32 v1, s5, v1
-; SI-NEXT:    s_lshr_b32 s7, s10, 8
-; SI-NEXT:    v_or_b32_e32 v0, v1, v0
-; SI-NEXT:    v_mov_b32_e32 v1, s7
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 9
-; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v2, s10
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 8
-; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v2, vcc
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
-; SI-NEXT:    v_and_b32_e32 v2, s5, v2
-; SI-NEXT:    v_or_b32_e32 v1, v2, v1
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT:    v_and_b32_e32 v1, s6, v1
-; SI-NEXT:    s_lshr_b32 s7, s9, 24
-; SI-NEXT:    v_or_b32_e32 v2, v1, v0
-; SI-NEXT:    v_mov_b32_e32 v0, s7
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
-; SI-NEXT:    s_lshr_b32 s7, s9, 16
-; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
-; SI-NEXT:    v_mov_b32_e32 v1, s7
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
-; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
-; SI-NEXT:    v_and_b32_e32 v1, s5, v1
-; SI-NEXT:    s_lshr_b32 s7, s9, 8
-; SI-NEXT:    v_or_b32_e32 v0, v1, v0
-; SI-NEXT:    v_mov_b32_e32 v1, s7
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
-; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, s9
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
-; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
-; SI-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
-; SI-NEXT:    v_and_b32_e32 v4, s5, v4
-; SI-NEXT:    v_or_b32_e32 v1, v4, v1
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT:    v_and_b32_e32 v1, s6, v1
-; SI-NEXT:    s_lshr_b32 s7, s8, 24
-; SI-NEXT:    v_or_b32_e32 v1, v1, v0
-; SI-NEXT:    v_mov_b32_e32 v0, s7
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
-; SI-NEXT:    s_lshr_b32 s7, s8, 16
-; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, s7
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
-; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
-; SI-NEXT:    v_and_b32_e32 v4, s5, v4
-; SI-NEXT:    s_lshr_b32 s7, s8, 8
-; SI-NEXT:    v_or_b32_e32 v0, v4, v0
-; SI-NEXT:    v_mov_b32_e32 v4, s7
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
-; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s8
-; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
-; SI-NEXT:    v_cndmask_b32_e32 v5, 5, v5, vcc
-; SI-NEXT:    v_lshlrev_b32_e32 v4, 8, v4
-; SI-NEXT:    v_and_b32_e32 v5, s5, v5
-; SI-NEXT:    v_or_b32_e32 v4, v5, v4
-; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT:    v_and_b32_e32 v4, s6, v4
-; SI-NEXT:    v_or_b32_e32 v0, v4, v0
+; SI-NEXT:    s_cmp_lg_u32 s4, 13
+; SI-NEXT:    s_cselect_b32 s6, s6, 5
+; SI-NEXT:    s_lshl_b32 s6, s6, 8
+; SI-NEXT:    s_cmp_lg_u32 s4, 12
+; SI-NEXT:    s_cselect_b32 s11, s11, 5
+; SI-NEXT:    s_and_b32 s11, s11, s7
+; SI-NEXT:    s_or_b32 s6, s11, s6
+; SI-NEXT:    s_mov_b32 s11, 0xffff
+; SI-NEXT:    s_and_b32 s6, s6, s11
+; SI-NEXT:    s_or_b32 s5, s6, s5
+; SI-NEXT:    s_lshr_b32 s6, s10, 24
+; SI-NEXT:    s_cmp_lg_u32 s4, 11
+; SI-NEXT:    s_cselect_b32 s6, s6, 5
+; SI-NEXT:    s_lshl_b32 s6, s6, 8
+; SI-NEXT:    s_lshr_b32 s12, s10, 16
+; SI-NEXT:    s_cmp_lg_u32 s4, 10
+; SI-NEXT:    s_cselect_b32 s12, s12, 5
+; SI-NEXT:    s_and_b32 s12, s12, s7
+; SI-NEXT:    s_or_b32 s6, s12, s6
+; SI-NEXT:    s_lshl_b32 s6, s6, 16
+; SI-NEXT:    s_lshr_b32 s12, s10, 8
+; SI-NEXT:    s_cmp_lg_u32 s4, 9
+; SI-NEXT:    s_cselect_b32 s12, s12, 5
+; SI-NEXT:    s_lshl_b32 s12, s12, 8
+; SI-NEXT:    s_cmp_lg_u32 s4, 8
+; SI-NEXT:    s_cselect_b32 s10, s10, 5
+; SI-NEXT:    s_and_b32 s10, s10, s7
+; SI-NEXT:    s_or_b32 s10, s10, s12
+; SI-NEXT:    s_and_b32 s10, s10, s11
+; SI-NEXT:    s_or_b32 s6, s10, s6
+; SI-NEXT:    s_lshr_b32 s10, s9, 24
+; SI-NEXT:    s_cmp_lg_u32 s4, 7
+; SI-NEXT:    s_cselect_b32 s10, s10, 5
+; SI-NEXT:    s_lshl_b32 s10, s10, 8
+; SI-NEXT:    s_lshr_b32 s12, s9, 16
+; SI-NEXT:    s_cmp_lg_u32 s4, 6
+; SI-NEXT:    s_cselect_b32 s12, s12, 5
+; SI-NEXT:    s_and_b32 s12, s12, s7
+; SI-NEXT:    s_or_b32 s10, s12, s10
+; SI-NEXT:    s_lshl_b32 s10, s10, 16
+; SI-NEXT:    s_lshr_b32 s12, s9, 8
+; SI-NEXT:    s_cmp_lg_u32 s4, 5
+; SI-NEXT:    s_cselect_b32 s12, s12, 5
+; SI-NEXT:    s_lshl_b32 s12, s12, 8
+; SI-NEXT:    s_cmp_lg_u32 s4, 4
+; SI-NEXT:    s_cselect_b32 s9, s9, 5
+; SI-NEXT:    s_and_b32 s9, s9, s7
+; SI-NEXT:    s_or_b32 s9, s9, s12
+; SI-NEXT:    s_and_b32 s9, s9, s11
+; SI-NEXT:    s_or_b32 s9, s9, s10
+; SI-NEXT:    s_lshr_b32 s10, s8, 24
+; SI-NEXT:    s_cmp_lg_u32 s4, 3
+; SI-NEXT:    s_cselect_b32 s10, s10, 5
+; SI-NEXT:    s_lshl_b32 s10, s10, 8
+; SI-NEXT:    s_lshr_b32 s12, s8, 16
+; SI-NEXT:    s_cmp_lg_u32 s4, 2
+; SI-NEXT:    s_cselect_b32 s12, s12, 5
+; SI-NEXT:    s_and_b32 s12, s12, s7
+; SI-NEXT:    s_or_b32 s10, s12, s10
+; SI-NEXT:    s_lshl_b32 s10, s10, 16
+; SI-NEXT:    s_lshr_b32 s12, s8, 8
+; SI-NEXT:    s_cmp_lg_u32 s4, 1
+; SI-NEXT:    s_cselect_b32 s12, s12, 5
+; SI-NEXT:    s_lshl_b32 s12, s12, 8
+; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    s_cselect_b32 s4, s8, 5
+; SI-NEXT:    s_and_b32 s4, s4, s7
+; SI-NEXT:    s_or_b32 s4, s4, s12
+; SI-NEXT:    s_and_b32 s4, s4, s11
+; SI-NEXT:    s_or_b32 s4, s4, s10
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    v_mov_b32_e32 v3, s5
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;

diff  --git a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
index 971f99dd7717..f60db1a4627b 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
@@ -75,7 +75,7 @@ entry:
 ; Check that the select instruction is not deleted.
 ; FUNC-LABEL: {{^}}i24_i32_i32_mad:
 ; EG: CNDE_INT
-; SI: v_cndmask
+; SI: s_cselect
 define amdgpu_kernel void @i24_i32_i32_mad(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
 entry:
   %0 = ashr i32 %a, 8

diff  --git a/llvm/test/CodeGen/AMDGPU/sad.ll b/llvm/test/CodeGen/AMDGPU/sad.ll
index 3a4a2d07772c..514223d47d88 100644
--- a/llvm/test/CodeGen/AMDGPU/sad.ll
+++ b/llvm/test/CodeGen/AMDGPU/sad.ll
@@ -134,7 +134,7 @@ define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat2(i32 addrspace(1)* %out,
 
 ; GCN-LABEL: {{^}}v_sad_u32_multi_use_select_pat2:
 ; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: v_cmp_gt_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN-DAG: s_cmp_gt_u32  s{{[0-9]+}}, s{{[0-9]+}}
 ; GCN-DAG: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_multi_use_select_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
   %icmp0 = icmp ugt i32 %a, %b
@@ -254,12 +254,12 @@ define amdgpu_kernel void @v_sad_u32_i8_pat2(i8 addrspace(1)* %out) {
 
 ; GCN-LABEL: {{^}}s_sad_u32_i8_pat2:
 ; GCN: s_load_dword
-; GCN: s_bfe_u32
+; GCN-DAG: s_bfe_u32
 ; GCN-DAG: s_sub_i32
 ; GCN-DAG: s_and_b32
 ; GCN-DAG: s_sub_i32
 ; GCN-DAG: s_lshr_b32
-; GCN: v_add_i32_e32
+; GCN: s_add_i32
 define amdgpu_kernel void @s_sad_u32_i8_pat2(i8 addrspace(1)* %out, i8 zeroext %a, i8 zeroext %b, i8 zeroext %c) {
   %icmp0 = icmp ugt i8 %a, %b
   %sub0 = sub i8 %a, %b
@@ -273,10 +273,10 @@ define amdgpu_kernel void @s_sad_u32_i8_pat2(i8 addrspace(1)* %out, i8 zeroext %
 }
 
 ; GCN-LABEL: {{^}}v_sad_u32_mismatched_operands_pat1:
-; GCN: v_cmp_le_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
 ; GCN: s_max_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
-; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN: s_cmp_le_u32 s{{[0-9]+}}, s{{[0-9]+}}
+; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
   %icmp0 = icmp ugt i32 %a, %b
   %t0 = select i1 %icmp0, i32 %a, i32 %b
@@ -294,7 +294,7 @@ define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat1(i32 addrspace(1)*
 ; GCN-LABEL: {{^}}v_sad_u32_mismatched_operands_pat2:
 ; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 ; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
+; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
   %icmp0 = icmp ugt i32 %a, %b
   %sub0 = sub i32 %a, %d

diff  --git a/llvm/test/CodeGen/AMDGPU/sdiv.ll b/llvm/test/CodeGen/AMDGPU/sdiv.ll
index 4b322a875005..dd87d23481ce 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv.ll
@@ -1605,7 +1605,7 @@ define amdgpu_kernel void @v_sdiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %i
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v2|
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 8
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
@@ -1638,7 +1638,7 @@ define amdgpu_kernel void @v_sdiv_i8(i32 addrspace(1)* %out, i8 addrspace(1)* %i
 ; TONGA-NEXT:    v_cvt_i32_f32_e32 v1, v1
 ; TONGA-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v2|
 ; TONGA-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v1, v0
 ; TONGA-NEXT:    v_bfe_i32 v0, v0, 0, 8
 ; TONGA-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; TONGA-NEXT:    s_endpgm
@@ -1758,7 +1758,7 @@ define amdgpu_kernel void @v_sdiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)*
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
 ; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v3|
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 23
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
@@ -1799,7 +1799,7 @@ define amdgpu_kernel void @v_sdiv_i23(i32 addrspace(1)* %out, i23 addrspace(1)*
 ; TONGA-NEXT:    v_cvt_i32_f32_e32 v2, v2
 ; TONGA-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v3|
 ; TONGA-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v2, v0
 ; TONGA-NEXT:    v_bfe_i32 v0, v0, 0, 23
 ; TONGA-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; TONGA-NEXT:    s_endpgm
@@ -1938,7 +1938,7 @@ define amdgpu_kernel void @v_sdiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)*
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v3, v3
 ; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v2|
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v3, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
@@ -1976,7 +1976,7 @@ define amdgpu_kernel void @v_sdiv_i24(i32 addrspace(1)* %out, i24 addrspace(1)*
 ; TONGA-NEXT:    v_cvt_i32_f32_e32 v3, v3
 ; TONGA-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v2|
 ; TONGA-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
-; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v0, v3
+; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v3, v0
 ; TONGA-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; TONGA-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; TONGA-NEXT:    s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 9ad18f412708..2dc94374df0e 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -6,7 +6,6 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_sdiv:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; GCN-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
@@ -15,123 +14,124 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    s_add_u32 s2, s2, s12
 ; GCN-NEXT:    s_mov_b32 s13, s12
 ; GCN-NEXT:    s_addc_u32 s3, s3, s12
-; GCN-NEXT:    s_xor_b64 s[2:3], s[2:3], s[12:13]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GCN-NEXT:    s_sub_u32 s4, 0, s2
-; GCN-NEXT:    s_subb_u32 s5, 0, s3
-; GCN-NEXT:    s_ashr_i32 s14, s11, 31
+; GCN-NEXT:    s_xor_b64 s[14:15], s[2:3], s[12:13]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s14
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s15
+; GCN-NEXT:    s_sub_u32 s2, 0, s14
+; GCN-NEXT:    s_subb_u32 s3, 0, s15
+; GCN-NEXT:    s_ashr_i32 s16, s11, 31
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    s_mov_b32 s15, s14
+; GCN-NEXT:    s_mov_b32 s17, s16
+; GCN-NEXT:    s_mov_b32 s4, s8
+; GCN-NEXT:    s_mov_b32 s5, s9
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
-; GCN-NEXT:    v_mul_f32_e32 v2, 0x2f800000, v0
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v2
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT:    v_mul_hi_u32 v4, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, s4, v2
-; GCN-NEXT:    v_mul_lo_u32 v6, s5, v0
-; GCN-NEXT:    v_mul_lo_u32 v5, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
-; GCN-NEXT:    v_mul_hi_u32 v4, v0, v5
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v8, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v9, v2, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, v2, v3
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v7, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v8, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v5, v2, v5
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v8, v4
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v6, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v9, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
-; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v3
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v7, v5, vcc
-; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v2, v4, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v5, s4, v3
-; GCN-NEXT:    v_mul_hi_u32 v6, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v8, s5, v0
-; GCN-NEXT:    s_mov_b32 s5, s9
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; GCN-NEXT:    v_mul_lo_u32 v6, s4, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v2, s2, v1
+; GCN-NEXT:    v_mul_lo_u32 v5, s3, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s2, v0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
+; GCN-NEXT:    v_mul_lo_u32 v6, v1, v4
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, v4
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GCN-NEXT:    v_mul_hi_u32 v8, v1, v2
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v5, vcc
+; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
+; GCN-NEXT:    v_mul_lo_u32 v5, s2, v2
+; GCN-NEXT:    v_mul_hi_u32 v7, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v8, s3, v0
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
+; GCN-NEXT:    v_mul_lo_u32 v7, s2, v0
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
 ; GCN-NEXT:    v_mul_lo_u32 v10, v0, v5
 ; GCN-NEXT:    v_mul_hi_u32 v12, v0, v5
-; GCN-NEXT:    v_mul_hi_u32 v11, v0, v6
-; GCN-NEXT:    v_mul_hi_u32 v9, v3, v6
-; GCN-NEXT:    v_mul_lo_u32 v6, v3, v6
-; GCN-NEXT:    v_mul_hi_u32 v8, v3, v5
+; GCN-NEXT:    v_mul_hi_u32 v11, v0, v7
+; GCN-NEXT:    v_mul_hi_u32 v9, v2, v7
+; GCN-NEXT:    v_mul_lo_u32 v7, v2, v7
+; GCN-NEXT:    v_mul_hi_u32 v8, v2, v5
 ; GCN-NEXT:    v_add_i32_e32 v10, vcc, v11, v10
-; GCN-NEXT:    v_addc_u32_e32 v11, vcc, v7, v12, vcc
-; GCN-NEXT:    v_mul_lo_u32 v3, v3, v5
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v10, v6
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v11, v9, vcc
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v2, v5, s[0:1]
-; GCN-NEXT:    s_add_u32 s0, s10, s14
-; GCN-NEXT:    s_addc_u32 s1, s11, s14
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
-; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[14:15]
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v3, s10, v2
-; GCN-NEXT:    v_mul_hi_u32 v4, s10, v0
-; GCN-NEXT:    v_mul_hi_u32 v5, s10, v2
-; GCN-NEXT:    v_mul_hi_u32 v6, s11, v2
-; GCN-NEXT:    v_mul_lo_u32 v2, s11, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v7, v5, vcc
+; GCN-NEXT:    v_addc_u32_e32 v11, vcc, 0, v12, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, v5
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, v10, v7
+; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v11, v9, vcc
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[0:1]
+; GCN-NEXT:    s_add_u32 s0, s10, s16
+; GCN-NEXT:    s_addc_u32 s1, s11, s16
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[16:17]
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
+; GCN-NEXT:    v_mul_hi_u32 v5, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v7, s11, v1
+; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    s_mov_b32 s4, s8
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v4, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v7, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s2, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s2, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s3, v0
-; GCN-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, s14, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s14, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s15, v0
+; GCN-NEXT:    v_mov_b32_e32 v5, s15
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_mul_lo_u32 v3, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v3, s14, v0
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s11, v2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s10, v3
-; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s2, v3
-; GCN-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s3, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v5, s[0:1], 2, v0
-; GCN-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 1, v0
-; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[0:1]
+; GCN-NEXT:    v_sub_i32_e64 v3, s[0:1], s10, v3
+; GCN-NEXT:    v_subb_u32_e64 v4, vcc, v4, v5, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, s14, v3
+; GCN-NEXT:    v_subbrev_u32_e32 v4, vcc, 0, v4, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s15, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s14, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s15, v4
+; GCN-NEXT:    v_cndmask_b32_e32 v4, v6, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v6, s11
-; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v6, v2, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s3, v2
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v6, v2, s[0:1]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s15, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v3
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s14, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s3, v2
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s15, v2
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    s_xor_b64 s[0:1], s[14:15], s[12:13]
+; GCN-NEXT:    s_xor_b64 s[0:1], s[16:17], s[12:13]
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
 ; GCN-NEXT:    v_xor_b32_e32 v0, s0, v0
 ; GCN-NEXT:    v_xor_b32_e32 v1, s1, v1
@@ -144,104 +144,98 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-LABEL: s_test_sdiv:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_ashr_i32 s2, s7, 31
+; GCN-IR-NEXT:    s_ashr_i32 s0, s7, 31
+; GCN-IR-NEXT:    s_mov_b32 s1, s0
+; GCN-IR-NEXT:    s_ashr_i32 s2, s9, 31
+; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[0:1], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s10, s6, s0
 ; GCN-IR-NEXT:    s_mov_b32 s3, s2
-; GCN-IR-NEXT:    s_ashr_i32 s8, s1, 31
-; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[2:3], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s10, s6, s2
-; GCN-IR-NEXT:    s_mov_b32 s9, s8
-; GCN-IR-NEXT:    s_subb_u32 s11, s7, s2
-; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[8:9], s[0:1]
-; GCN-IR-NEXT:    s_sub_u32 s6, s0, s8
-; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s6
-; GCN-IR-NEXT:    s_subb_u32 s7, s1, s8
-; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s7
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    s_subb_u32 s11, s7, s0
+; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[2:3], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s6, s6, s2
+; GCN-IR-NEXT:    s_subb_u32 s7, s7, s2
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[6:7], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[10:11], 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[12:13], s[14:15]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s7
+; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_cselect_b32 s12, s12, s14
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s10
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s11
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s11, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[14:15], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[12:13]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[12:13], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[10:11], v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cmp_eq_u32 s11, 0
+; GCN-IR-NEXT:    s_cselect_b32 s16, s14, s15
+; GCN-IR-NEXT:    s_sub_u32 s14, s12, s16
+; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[20:21], s[14:15], 63
+; GCN-IR-NEXT:    s_mov_b32 s13, 0
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[20:21]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[20:21], s[14:15], 63
+; GCN-IR-NEXT:    s_xor_b64 s[22:23], s[18:19], -1
+; GCN-IR-NEXT:    s_and_b64 s[20:21], s[22:23], s[20:21]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s18, s14, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-IR-NEXT:    s_addc_u32 s19, s15, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s15
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[18:19], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s14, 63, s14
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[10:11], s14
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[10:11], v4
+; GCN-IR-NEXT:    s_lshr_b64 s[18:19], s[10:11], s18
 ; GCN-IR-NEXT:    s_add_u32 s10, s6, -1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v2, v3
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-IR-NEXT:    s_addc_u32 s11, s7, -1
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_not_b64 s[8:9], s[12:13]
+; GCN-IR-NEXT:    s_mov_b32 s17, s13
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s16
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, s13
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
+; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:  BB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s11
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s10, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s6, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s7, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s8, s15, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[18:19], s[18:19], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[16:17], s[14:15]
+; GCN-IR-NEXT:    s_sub_u32 s8, s10, s18
+; GCN-IR-NEXT:    s_subb_u32 s8, s11, s19
+; GCN-IR-NEXT:    s_ashr_i32 s16, s8, 31
+; GCN-IR-NEXT:    s_mov_b32 s17, s16
+; GCN-IR-NEXT:    s_and_b32 s8, s16, 1
+; GCN-IR-NEXT:    s_and_b64 s[20:21], s[16:17], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s18, s18, s20
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-IR-NEXT:    s_subb_u32 s19, s19, s21
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], s[8:9]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB0_3
+; GCN-IR-NEXT:  BB0_4: ; %Flow6
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[14:15], 1
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-IR-NEXT:    s_branch BB0_6
-; GCN-IR-NEXT:  BB0_4:
+; GCN-IR-NEXT:  BB0_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[18:19]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB0_7
-; GCN-IR-NEXT:  BB0_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB0_6: ; %Flow6
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB0_7: ; %Flow7
-; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[8:9], s[2:3]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[18:19]
+; GCN-IR-NEXT:  BB0_6: ; %Flow7
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
 ; GCN-IR-NEXT:    v_xor_b32_e32 v0, s0, v0
 ; GCN-IR-NEXT:    v_xor_b32_e32 v1, s1, v1
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s1
@@ -512,15 +506,15 @@ define amdgpu_kernel void @s_test_sdiv24_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_xor_b32 s4, s4, s8
 ; GCN-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-NEXT:    s_or_b32 s4, s4, 1
+; GCN-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -542,15 +536,15 @@ define amdgpu_kernel void @s_test_sdiv24_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-IR-NEXT:    s_xor_b32 s4, s4, s8
 ; GCN-IR-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
+; GCN-IR-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-IR-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -619,16 +613,16 @@ define amdgpu_kernel void @s_test_sdiv32_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    s_xor_b32 s4, s7, s6
 ; GCN-NEXT:    s_ashr_i32 s4, s4, 30
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_or_b32 s4, s4, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s4
 ; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
@@ -647,16 +641,16 @@ define amdgpu_kernel void @s_test_sdiv32_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    s_xor_b32 s4, s7, s6
 ; GCN-IR-NEXT:    s_ashr_i32 s4, s4, 30
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
 ; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-IR-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-IR-NEXT:    s_endpgm
@@ -684,15 +678,15 @@ define amdgpu_kernel void @s_test_sdiv31_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_xor_b32 s4, s4, s8
 ; GCN-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-NEXT:    s_or_b32 s4, s4, 1
+; GCN-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 31
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -714,15 +708,15 @@ define amdgpu_kernel void @s_test_sdiv31_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-IR-NEXT:    s_xor_b32 s4, s4, s8
 ; GCN-IR-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
+; GCN-IR-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-IR-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 31
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -751,15 +745,15 @@ define amdgpu_kernel void @s_test_sdiv23_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_xor_b32 s4, s4, s8
 ; GCN-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-NEXT:    s_or_b32 s4, s4, 1
+; GCN-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 23
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -781,15 +775,15 @@ define amdgpu_kernel void @s_test_sdiv23_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-IR-NEXT:    s_xor_b32 s4, s4, s8
 ; GCN-IR-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
+; GCN-IR-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-IR-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 23
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -818,15 +812,15 @@ define amdgpu_kernel void @s_test_sdiv25_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_xor_b32 s4, s4, s8
 ; GCN-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-NEXT:    s_or_b32 s4, s4, 1
+; GCN-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 25
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -848,15 +842,15 @@ define amdgpu_kernel void @s_test_sdiv25_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-IR-NEXT:    s_xor_b32 s4, s4, s8
 ; GCN-IR-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
+; GCN-IR-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-IR-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 25
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -882,35 +876,35 @@ define amdgpu_kernel void @s_test_sdiv24_v2i64(<2 x i64> addrspace(1)* %out, <2
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s0
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s8
 ; GCN-NEXT:    s_xor_b32 s0, s8, s0
-; GCN-NEXT:    s_ashr_i32 s0, s0, 30
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_or_b32 s0, s0, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s0
 ; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 40
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; GCN-NEXT:    s_ashr_i32 s0, s0, 30
+; GCN-NEXT:    s_ashr_i64 s[10:11], s[10:11], 40
+; GCN-NEXT:    s_or_b32 s3, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    s_ashr_i64 s[10:11], s[10:11], 40
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s3, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v2, s2
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v3, s10
 ; GCN-NEXT:    s_xor_b32 s0, s10, s2
 ; GCN-NEXT:    s_ashr_i32 s0, s0, 30
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v4, v2
-; GCN-NEXT:    s_or_b32 s0, s0, 1
-; GCN-NEXT:    v_mov_b32_e32 v5, s0
+; GCN-NEXT:    s_or_b32 s2, s0, 1
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    v_mul_f32_e32 v4, v3, v4
 ; GCN-NEXT:    v_trunc_f32_e32 v4, v4
 ; GCN-NEXT:    v_mad_f32 v3, -v4, v2, v3
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v4, v4
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v2|
-; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v5, vcc
-; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v3|, |v2|
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s2, 0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, s0, v4
 ; GCN-NEXT:    v_bfe_i32 v2, v2, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
@@ -929,35 +923,35 @@ define amdgpu_kernel void @s_test_sdiv24_v2i64(<2 x i64> addrspace(1)* %out, <2
 ; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s0
 ; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s8
 ; GCN-IR-NEXT:    s_xor_b32 s0, s8, s0
-; GCN-IR-NEXT:    s_ashr_i32 s0, s0, 30
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    s_or_b32 s0, s0, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s0
 ; GCN-IR-NEXT:    s_ashr_i64 s[2:3], s[2:3], 40
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; GCN-IR-NEXT:    s_ashr_i32 s0, s0, 30
+; GCN-IR-NEXT:    s_ashr_i64 s[10:11], s[10:11], 40
+; GCN-IR-NEXT:    s_or_b32 s3, s0, 1
 ; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    s_ashr_i64 s[10:11], s[10:11], 40
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-IR-NEXT:    s_cselect_b32 s0, s3, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
 ; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v2, s2
 ; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v3, s10
 ; GCN-IR-NEXT:    s_xor_b32 s0, s10, s2
 ; GCN-IR-NEXT:    s_ashr_i32 s0, s0, 30
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v4, v2
-; GCN-IR-NEXT:    s_or_b32 s0, s0, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, s0
+; GCN-IR-NEXT:    s_or_b32 s2, s0, 1
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-IR-NEXT:    v_mul_f32_e32 v4, v3, v4
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v4, v4
 ; GCN-IR-NEXT:    v_mad_f32 v3, -v4, v2, v3
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v4, v4
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v2|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, 0, v5, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v3|, |v2|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-IR-NEXT:    s_cselect_b32 s0, s2, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s0, v4
 ; GCN-IR-NEXT:    v_bfe_i32 v2, v2, 0, 24
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GCN-IR-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
@@ -998,7 +992,7 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
 ; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v1|
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
@@ -1014,105 +1008,99 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_load_dword s0, s[0:1], 0xe
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_sext_i32_i16 s3, s3
+; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[2:3], 24
 ; GCN-IR-NEXT:    s_sext_i32_i16 s7, s0
-; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 24
-; GCN-IR-NEXT:    s_ashr_i32 s2, s3, 31
+; GCN-IR-NEXT:    s_ashr_i32 s0, s3, 31
+; GCN-IR-NEXT:    s_mov_b32 s1, s0
+; GCN-IR-NEXT:    s_ashr_i32 s2, s7, 31
+; GCN-IR-NEXT:    s_ashr_i64 s[12:13], s[6:7], 24
+; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[0:1], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s10, s6, s0
 ; GCN-IR-NEXT:    s_mov_b32 s3, s2
-; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[6:7], 24
-; GCN-IR-NEXT:    s_ashr_i32 s6, s7, 31
-; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
-; GCN-IR-NEXT:    s_sub_u32 s10, s0, s2
-; GCN-IR-NEXT:    s_mov_b32 s7, s6
-; GCN-IR-NEXT:    s_subb_u32 s11, s1, s2
-; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[6:7], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s8, s0, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s8
-; GCN-IR-NEXT:    s_subb_u32 s9, s1, s6
-; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    s_subb_u32 s11, s7, s0
+; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[2:3], s[12:13]
+; GCN-IR-NEXT:    s_sub_u32 s6, s6, s2
+; GCN-IR-NEXT:    s_subb_u32 s7, s7, s2
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[6:7], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[10:11], 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[12:13], s[14:15]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s7
+; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_cselect_b32 s12, s12, s14
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s10
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
 ; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s11
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s11, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[14:15], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[12:13]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[12:13], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
-; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[10:11], v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cmp_eq_u32 s11, 0
+; GCN-IR-NEXT:    s_cselect_b32 s16, s14, s15
+; GCN-IR-NEXT:    s_sub_u32 s14, s12, s16
+; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[20:21], s[14:15], 63
+; GCN-IR-NEXT:    s_mov_b32 s13, 0
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[20:21]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[20:21], s[14:15], 63
+; GCN-IR-NEXT:    s_xor_b64 s[22:23], s[18:19], -1
+; GCN-IR-NEXT:    s_and_b64 s[20:21], s[22:23], s[20:21]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB9_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s18, s14, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-IR-NEXT:    s_addc_u32 s19, s15, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s15
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[18:19], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s14, 63, s14
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[10:11], s14
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[10:11], v4
-; GCN-IR-NEXT:    s_add_u32 s10, s8, -1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v2, v3
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, -1
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_lshr_b64 s[18:19], s[10:11], s18
+; GCN-IR-NEXT:    s_add_u32 s10, s6, -1
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, -1
+; GCN-IR-NEXT:    s_not_b64 s[8:9], s[12:13]
+; GCN-IR-NEXT:    s_mov_b32 s17, s13
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s16
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, s13
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
+; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:  BB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s11
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s10, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s8, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s9, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s8, s15, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[18:19], s[18:19], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[16:17], s[14:15]
+; GCN-IR-NEXT:    s_sub_u32 s8, s10, s18
+; GCN-IR-NEXT:    s_subb_u32 s8, s11, s19
+; GCN-IR-NEXT:    s_ashr_i32 s16, s8, 31
+; GCN-IR-NEXT:    s_mov_b32 s17, s16
+; GCN-IR-NEXT:    s_and_b32 s8, s16, 1
+; GCN-IR-NEXT:    s_and_b64 s[20:21], s[16:17], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s18, s18, s20
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-IR-NEXT:    s_subb_u32 s19, s19, s21
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], s[8:9]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB9_3
+; GCN-IR-NEXT:  BB9_4: ; %Flow3
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[14:15], 1
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-IR-NEXT:    s_branch BB9_6
-; GCN-IR-NEXT:  BB9_4:
+; GCN-IR-NEXT:  BB9_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[18:19]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB9_7
-; GCN-IR-NEXT:  BB9_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB9_6: ; %Flow3
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB9_7: ; %Flow4
-; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[6:7], s[2:3]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[18:19]
+; GCN-IR-NEXT:  BB9_6: ; %Flow4
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
 ; GCN-IR-NEXT:    v_xor_b32_e32 v0, s0, v0
 ; GCN-IR-NEXT:    v_xor_b32_e32 v1, s1, v1
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s1
@@ -1136,15 +1124,15 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
 ; GCN-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s2, s7, 31
-; GCN-NEXT:    s_add_u32 s0, s6, s2
-; GCN-NEXT:    s_addc_u32 s1, s7, s2
-; GCN-NEXT:    s_mov_b32 s3, s2
-; GCN-NEXT:    s_xor_b64 s[8:9], s[0:1], s[2:3]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s9
-; GCN-NEXT:    s_sub_u32 s3, 0, s8
-; GCN-NEXT:    s_subb_u32 s10, 0, s9
+; GCN-NEXT:    s_ashr_i32 s8, s7, 31
+; GCN-NEXT:    s_add_u32 s0, s6, s8
+; GCN-NEXT:    s_addc_u32 s1, s7, s8
+; GCN-NEXT:    s_mov_b32 s9, s8
+; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[8:9]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s10
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s11
+; GCN-NEXT:    s_sub_u32 s2, 0, s10
+; GCN-NEXT:    s_subb_u32 s3, 0, s11
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
@@ -1156,10 +1144,10 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v3
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT:    v_mul_hi_u32 v5, s3, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s3, v3
-; GCN-NEXT:    v_mul_lo_u32 v7, s10, v0
-; GCN-NEXT:    v_mul_lo_u32 v6, s3, v0
+; GCN-NEXT:    v_mul_hi_u32 v5, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s2, v3
+; GCN-NEXT:    v_mul_lo_u32 v7, s3, v0
+; GCN-NEXT:    v_mul_lo_u32 v6, s2, v0
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v7
 ; GCN-NEXT:    v_mul_hi_u32 v5, v0, v6
@@ -1178,11 +1166,11 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v4
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v2, v6, vcc
 ; GCN-NEXT:    v_addc_u32_e64 v4, vcc, v3, v5, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v6, s3, v4
-; GCN-NEXT:    v_mul_hi_u32 v7, s3, v0
-; GCN-NEXT:    v_mul_lo_u32 v8, s10, v0
+; GCN-NEXT:    v_mul_lo_u32 v6, s2, v4
+; GCN-NEXT:    v_mul_hi_u32 v7, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v8, s3, v0
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-NEXT:    v_mul_lo_u32 v7, s3, v0
+; GCN-NEXT:    v_mul_lo_u32 v7, s2, v0
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, v8, v6
 ; GCN-NEXT:    v_mul_lo_u32 v10, v0, v6
 ; GCN-NEXT:    v_mul_hi_u32 v12, v0, v6
@@ -1212,139 +1200,133 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, 0, v4
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v2, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v3, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s8, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s8, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s9, v0
-; GCN-NEXT:    v_mov_b32_e32 v5, s9
+; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s11, v0
+; GCN-NEXT:    v_mov_b32_e32 v5, s11
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_mul_lo_u32 v3, s8, v0
+; GCN-NEXT:    v_mul_lo_u32 v3, s10, v0
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 24, v3
-; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s8, v3
-; GCN-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s9, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s8, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s9, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v5, s[0:1], 2, v0
-; GCN-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 1, v0
-; GCN-NEXT:    v_subb_u32_e32 v2, vcc, 0, v2, vcc
-; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s9, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[0:1]
+; GCN-NEXT:    v_sub_i32_e64 v3, s[0:1], 24, v3
+; GCN-NEXT:    v_subb_u32_e64 v4, vcc, v4, v5, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, s10, v3
+; GCN-NEXT:    v_subbrev_u32_e32 v4, vcc, 0, v4, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s11, v4
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s8, v3
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s10, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s11, v4
+; GCN-NEXT:    v_cndmask_b32_e32 v4, v6, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, 0, v2, s[0:1]
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v4
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s11, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[2:3]
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s10, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s9, v2
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s11, v2
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
-; GCN-NEXT:    v_xor_b32_e32 v0, s2, v0
-; GCN-NEXT:    v_xor_b32_e32 v1, s2, v1
-; GCN-NEXT:    v_mov_b32_e32 v2, s2
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
+; GCN-NEXT:    v_xor_b32_e32 v0, s8, v0
+; GCN-NEXT:    v_xor_b32_e32 v1, s8, v1
+; GCN-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v0
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_sdiv_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
-; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_ashr_i32 s2, s7, 31
-; GCN-IR-NEXT:    s_mov_b32 s3, s2
-; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s6, s0, s2
-; GCN-IR-NEXT:    s_subb_u32 s7, s1, s2
-; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
-; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s7
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s8
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[8:9], 0, -1, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
-; GCN-IR-NEXT:    s_cbranch_vccz BB10_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], 24, v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_ashr_i32 s4, s3, 31
+; GCN-IR-NEXT:    s_mov_b32 s5, s4
+; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[4:5], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s2, s2, s4
+; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
+; GCN-IR-NEXT:    s_subb_u32 s3, s3, s4
+; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
+; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
+; GCN-IR-NEXT:    s_cselect_b32 s10, s6, s7
+; GCN-IR-NEXT:    s_add_u32 s8, s10, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT:    s_mov_b64 s[6:7], 0
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[14:15]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT:    s_xor_b64 s[16:17], s[12:13], -1
+; GCN-IR-NEXT:    s_and_b64 s[14:15], s[16:17], s[14:15]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB10_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s14, s8, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    s_addc_u32 s15, s9, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[14:15], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], 24, s8
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_add_u32 s8, s6, -1
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 58, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s9, s7, -1
-; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[0:1], 0, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_lshr_b64 s[16:17], 24, s14
+; GCN-IR-NEXT:    s_add_u32 s8, s2, -1
+; GCN-IR-NEXT:    s_addc_u32 s9, s3, -1
+; GCN-IR-NEXT:    s_sub_u32 s10, 58, s10
+; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:  BB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s9
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s8, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s6, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s7, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s6, s13, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[16:17], s[16:17], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[14:15], s[12:13]
+; GCN-IR-NEXT:    s_sub_u32 s6, s8, s16
+; GCN-IR-NEXT:    s_subb_u32 s6, s9, s17
+; GCN-IR-NEXT:    s_ashr_i32 s14, s6, 31
+; GCN-IR-NEXT:    s_mov_b32 s15, s14
+; GCN-IR-NEXT:    s_and_b32 s6, s14, 1
+; GCN-IR-NEXT:    s_and_b64 s[18:19], s[14:15], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s16, s16, s18
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    s_subb_u32 s17, s17, s19
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB10_3
+; GCN-IR-NEXT:  BB10_4: ; %Flow5
+; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[12:13], 1
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[6:7], s[2:3]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-IR-NEXT:    s_branch BB10_6
-; GCN-IR-NEXT:  BB10_4:
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB10_7
 ; GCN-IR-NEXT:  BB10_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB10_6: ; %Flow5
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB10_7: ; %udiv-end
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, s2, v0
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, s3, v1
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
-; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[12:13]
+; GCN-IR-NEXT:  BB10_6: ; %udiv-end
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, s4, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, s5, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s5
+; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s4, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-IR-NEXT:    s_mov_b32 s6, -1
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = sdiv i64 24, %x
   store i64 %result, i64 addrspace(1)* %out
@@ -1895,16 +1877,16 @@ define amdgpu_kernel void @s_test_sdiv24_k_num_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    s_mov_b32 s0, s4
 ; GCN-NEXT:    s_ashr_i32 s4, s6, 30
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v0
-; GCN-NEXT:    s_or_b32 s4, s4, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s4
 ; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-NEXT:    v_mul_f32_e32 v1, s7, v1
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_mad_f32 v2, -v1, v0, s7
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v2|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v1
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -1922,16 +1904,16 @@ define amdgpu_kernel void @s_test_sdiv24_k_num_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-IR-NEXT:    s_mov_b32 s0, s4
 ; GCN-IR-NEXT:    s_ashr_i32 s4, s6, 30
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v1, v0
-; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
 ; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_or_b32 s6, s4, 1
 ; GCN-IR-NEXT:    v_mul_f32_e32 v1, s7, v1
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-IR-NEXT:    v_mad_f32 v2, -v1, v0, s7
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v1, v1
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v2|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-IR-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s4, v1
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -1954,17 +1936,17 @@ define amdgpu_kernel void @s_test_sdiv24_k_den_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s6
 ; GCN-NEXT:    s_mov_b32 s0, s4
 ; GCN-NEXT:    s_ashr_i32 s4, s6, 30
-; GCN-NEXT:    s_or_b32 s4, s4, 1
+; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_mad_f32 v0, -v1, s8, v0
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
-; GCN-NEXT:    v_mov_b32_e32 v2, s4
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s8
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    s_or_b32 s6, s4, 1
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v0|, s8
+; GCN-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v1
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
-; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
@@ -1980,17 +1962,17 @@ define amdgpu_kernel void @s_test_sdiv24_k_den_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s6
 ; GCN-IR-NEXT:    s_mov_b32 s0, s4
 ; GCN-IR-NEXT:    s_ashr_i32 s4, s6, 30
-; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-IR-NEXT:    v_mad_f32 v0, -v1, s8, v0
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v1, v1
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s4
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s8
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    s_or_b32 s6, s4, 1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v0|, s8
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GCN-IR-NEXT:    s_cselect_b32 s4, s6, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s4, v1
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-IR-NEXT:    s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/select-i1.ll b/llvm/test/CodeGen/AMDGPU/select-i1.ll
index 8e60aa1811f4..951449c463d0 100644
--- a/llvm/test/CodeGen/AMDGPU/select-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-i1.ll
@@ -1,4 +1,4 @@
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
 
 ; FIXME: This should go in existing select.ll test, except the current testcase there is broken on GCN

diff  --git a/llvm/test/CodeGen/AMDGPU/select-opt.ll b/llvm/test/CodeGen/AMDGPU/select-opt.ll
index 24df126e4caf..7054b013b0a6 100644
--- a/llvm/test/CodeGen/AMDGPU/select-opt.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-opt.ll
@@ -7,8 +7,10 @@
 ; GCN-LABEL: {{^}}opt_select_i32_and_cmp_i32:
 ; GCN-DAG: v_cmp_ne_u32_e32 vcc,
 ; GCN-DAG: v_cmp_ne_u32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]]
-; GCN: s_and_b64 vcc, vcc, [[CMP1]]
-; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
+; GCN: s_and_b64 [[CMP1]], vcc, [[CMP1]]
+; GCN: s_cselect_b32 [[SRESULT:s[0-9]+]], {{s[0-9]+}}, {{s[0-9]+}}
+; GCN-NOT: [[SRESULT]]
+; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], [[SRESULT]]
 ; GCN-NOT: [[RESULT]]
 ; GCN: buffer_store_dword [[RESULT]]
 define amdgpu_kernel void @opt_select_i32_and_cmp_i32(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %x, i32 %y) #0 {
@@ -23,8 +25,10 @@ define amdgpu_kernel void @opt_select_i32_and_cmp_i32(i32 addrspace(1)* %out, i3
 ; GCN-LABEL: {{^}}opt_select_i32_and_cmp_f32:
 ; GCN-DAG: v_cmp_lg_f32_e32 vcc
 ; GCN-DAG: v_cmp_lg_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]]
-; GCN: s_and_b64 vcc, vcc, [[CMP1]]
-; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
+; GCN: s_and_b64 [[CMP1]], vcc, [[CMP1]]
+; GCN: s_cselect_b32 [[SRESULT:s[0-9]+]], {{s[0-9]+}}, {{s[0-9]+}}
+; GCN-NOT: [[SRESULT]]
+; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], [[SRESULT]]
 ; GCN-NOT: [[RESULT]]
 ; GCN: buffer_store_dword [[RESULT]]
 define amdgpu_kernel void @opt_select_i32_and_cmp_f32(i32 addrspace(1)* %out, float %a, float %b, float %c, i32 %x, i32 %y) #0 {
@@ -71,8 +75,10 @@ define amdgpu_kernel void @opt_select_i64_and_cmp_f32(i64 addrspace(1)* %out, fl
 ; GCN-LABEL: {{^}}opt_select_i32_or_cmp_i32:
 ; GCN-DAG: v_cmp_ne_u32_e32 vcc,
 ; GCN-DAG: v_cmp_ne_u32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]]
-; GCN: s_or_b64 vcc, vcc, [[CMP1]]
-; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
+; GCN: s_or_b64 [[CMP1]], vcc, [[CMP1]]
+; GCN: s_cselect_b32 [[SRESULT:s[0-9]+]], {{s[0-9]+}}, {{s[0-9]+}}
+; GCN-NOT: [[SRESULT]]
+; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], [[SRESULT]]
 ; GCN-NOT: [[RESULT]]
 ; GCN: buffer_store_dword [[RESULT]]
 ; GCN: s_endpgm
@@ -88,8 +94,10 @@ define amdgpu_kernel void @opt_select_i32_or_cmp_i32(i32 addrspace(1)* %out, i32
 ; GCN-LABEL: {{^}}opt_select_i32_or_cmp_f32:
 ; GCN-DAG: v_cmp_lg_f32_e32 vcc
 ; GCN-DAG: v_cmp_lg_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]]
-; GCN: s_or_b64 vcc, vcc, [[CMP1]]
-; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
+; GCN: s_or_b64 [[CMP1]], vcc, [[CMP1]]
+; GCN: s_cselect_b32 [[SRESULT:s[0-9]+]], {{s[0-9]+}}, {{s[0-9]+}}
+; GCN-NOT: [[SRESULT]]
+; GCN: v_mov_b32_e32 [[RESULT:v[0-9]+]], [[SRESULT]]
 ; GCN-NOT: [[RESULT]]
 ; GCN: buffer_store_dword [[RESULT]]
 define amdgpu_kernel void @opt_select_i32_or_cmp_f32(i32 addrspace(1)* %out, float %a, float %b, float %c, i32 %x, i32 %y) #0 {

diff  --git a/llvm/test/CodeGen/AMDGPU/select-vectors.ll b/llvm/test/CodeGen/AMDGPU/select-vectors.ll
index 4c136d09c4ab..9e6d1a7cf76b 100644
--- a/llvm/test/CodeGen/AMDGPU/select-vectors.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-vectors.ll
@@ -66,8 +66,8 @@ define amdgpu_kernel void @v_select_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8
 }
 
 ; GCN-LABEL: {{^}}select_v4i8:
-; GCN: v_cndmask_b32
-; GCN-NOT: cndmask
+; GCN: s_cselect_b32
+; GCN-NOT: s_cselect_b32
 define amdgpu_kernel void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, <4 x i8> %b, i8 %c) #0 {
   %cmp = icmp eq i8 %c, 0
   %select = select i1 %cmp, <4 x i8> %a, <4 x i8> %b
@@ -79,11 +79,11 @@ define amdgpu_kernel void @select_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a,
 ; GFX89: s_load_dword
 ; GFX89: s_load_dword
 ; GFX89: s_load_dword
-; GFX89: v_cndmask_b32
-; GFX89-NOT: v_cndmask_b32
+; GFX89: s_cselect_b32
+; GFX89-NOT: s_cselect_b32
 
-; SI: v_cndmask_b32_e32
-; SI-NOT: v_cndmask_b32e
+; SI: s_cselect_b32
+; SI-NOT: s_cselect_b32
 define amdgpu_kernel void @select_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, <2 x i16> %b, i32 %c) #0 {
   %cmp = icmp eq i32 %c, 0
   %select = select i1 %cmp, <2 x i16> %a, <2 x i16> %b

diff  --git a/llvm/test/CodeGen/AMDGPU/select64.ll b/llvm/test/CodeGen/AMDGPU/select64.ll
index 0a48227f5c50..9c492dcfb351 100644
--- a/llvm/test/CodeGen/AMDGPU/select64.ll
+++ b/llvm/test/CodeGen/AMDGPU/select64.ll
@@ -16,8 +16,8 @@ entry:
 }
 
 ; CHECK-LABEL: {{^}}select_trunc_i64:
-; CHECK: v_cndmask_b32
-; CHECK-NOT: v_cndmask_b32
+; CHECK: s_cselect_b32
+; CHECK-NOT: s_cselect_b32
 define amdgpu_kernel void @select_trunc_i64(i32 addrspace(1)* %out, i32 %cond, i64 %in) nounwind {
   %cmp = icmp ugt i32 %cond, 5
   %sel = select i1 %cmp, i64 0, i64 %in
@@ -27,8 +27,8 @@ define amdgpu_kernel void @select_trunc_i64(i32 addrspace(1)* %out, i32 %cond, i
 }
 
 ; CHECK-LABEL: {{^}}select_trunc_i64_2:
-; CHECK: v_cndmask_b32
-; CHECK-NOT: v_cndmask_b32
+; CHECK: s_cselect_b32
+; CHECK-NOT: s_cselect_b32
 define amdgpu_kernel void @select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 %a, i64 %b) nounwind {
   %cmp = icmp ugt i32 %cond, 5
   %sel = select i1 %cmp, i64 %a, i64 %b
@@ -38,8 +38,8 @@ define amdgpu_kernel void @select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond,
 }
 
 ; CHECK-LABEL: {{^}}v_select_trunc_i64_2:
-; CHECK: v_cndmask_b32
-; CHECK-NOT: v_cndmask_b32
+; CHECK: s_cselect_b32
+; CHECK-NOT: s_cselect_b32
 define amdgpu_kernel void @v_select_trunc_i64_2(i32 addrspace(1)* %out, i32 %cond, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
   %cmp = icmp ugt i32 %cond, 5
   %a = load i64, i64 addrspace(1)* %aptr, align 8

diff  --git a/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll b/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
index 663831f06f4b..833a94174828 100644
--- a/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
@@ -15,9 +15,10 @@ define amdgpu_kernel void @sint_to_fp_i32_to_f64(double addrspace(1)* %out, i32
 ; uses an SGPR (implicit vcc).
 
 ; GCN-LABEL: {{^}}sint_to_fp_i1_f64:
-; GCN-DAG: v_cmp_eq_u32_e64 vcc,
-; GCN-DAG: v_cndmask_b32_e32 v[[SEL:[0-9]+]], 0, v{{[0-9]+}}
+; GCN-DAG: s_cmp_eq_u32
+; GCN-DAG: s_cselect_b32 s[[SSEL:[0-9]+]], 0xbff00000, 0
 ; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
+; GCN-DAG: v_mov_b32_e32 v[[SEL:[0-9]+]], s[[SSEL]]
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[ZERO]]:[[SEL]]{{\]}}
 ; GCN: s_endpgm
 define amdgpu_kernel void @sint_to_fp_i1_f64(double addrspace(1)* %out, i32 %in) {

diff  --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index a0e16ae0cef6..eb35241f461e 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -6,7 +6,6 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_srem:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0xd
-; GCN-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
@@ -18,99 +17,100 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    s_mov_b32 s4, s8
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    s_mov_b32 s5, s9
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
-; GCN-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v0
-; GCN-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v3
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT:    v_mul_hi_u32 v5, s2, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s2, v3
-; GCN-NEXT:    v_mul_lo_u32 v7, s3, v0
-; GCN-NEXT:    v_mul_lo_u32 v6, s2, v0
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v7
-; GCN-NEXT:    v_mul_hi_u32 v5, v0, v6
-; GCN-NEXT:    v_mul_lo_u32 v7, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v9, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v8, v3, v6
-; GCN-NEXT:    v_mul_hi_u32 v6, v3, v6
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
-; GCN-NEXT:    v_mul_hi_u32 v9, v3, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v9, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v4
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v2, v6, vcc
-; GCN-NEXT:    v_addc_u32_e64 v4, vcc, v3, v5, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v6, s2, v4
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v2, s2, v1
+; GCN-NEXT:    v_mul_lo_u32 v5, s3, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s2, v0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v6, v1, v4
+; GCN-NEXT:    v_mul_lo_u32 v4, v1, v4
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GCN-NEXT:    v_mul_hi_u32 v8, v1, v2
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v6, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v5, vcc
+; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
+; GCN-NEXT:    v_mul_lo_u32 v5, s2, v2
 ; GCN-NEXT:    v_mul_hi_u32 v7, s2, v0
 ; GCN-NEXT:    v_mul_lo_u32 v8, s3, v0
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
 ; GCN-NEXT:    v_mul_lo_u32 v7, s2, v0
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v8, v6
-; GCN-NEXT:    v_mul_lo_u32 v10, v0, v6
-; GCN-NEXT:    v_mul_hi_u32 v12, v0, v6
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; GCN-NEXT:    v_mul_lo_u32 v10, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v12, v0, v5
 ; GCN-NEXT:    v_mul_hi_u32 v11, v0, v7
-; GCN-NEXT:    v_mul_hi_u32 v9, v4, v7
-; GCN-NEXT:    v_mul_lo_u32 v7, v4, v7
-; GCN-NEXT:    v_mul_hi_u32 v8, v4, v6
+; GCN-NEXT:    v_mul_hi_u32 v9, v2, v7
+; GCN-NEXT:    v_mul_lo_u32 v7, v2, v7
+; GCN-NEXT:    v_mul_hi_u32 v8, v2, v5
 ; GCN-NEXT:    v_add_i32_e32 v10, vcc, v11, v10
-; GCN-NEXT:    v_addc_u32_e32 v11, vcc, v2, v12, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v4, v6
+; GCN-NEXT:    v_addc_u32_e32 v11, vcc, 0, v12, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, v5
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, v10, v7
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v11, v9, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v8, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v7, v4
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v2, v6, vcc
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v3, v6, s[0:1]
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s10, v3
-; GCN-NEXT:    v_mul_hi_u32 v5, s10, v0
-; GCN-NEXT:    v_mul_hi_u32 v6, s10, v3
-; GCN-NEXT:    v_mul_hi_u32 v7, s11, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s11, v3
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v2, v6, vcc
-; GCN-NEXT:    v_mul_lo_u32 v6, s11, v0
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[0:1]
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
+; GCN-NEXT:    v_mul_hi_u32 v5, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v7, s11, v1
+; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v5, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v7, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v2, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v2, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v1, s12, v1
 ; GCN-NEXT:    v_mul_hi_u32 v2, s12, v0
 ; GCN-NEXT:    v_mul_lo_u32 v3, s13, v0
 ; GCN-NEXT:    v_mul_lo_u32 v0, s12, v0
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_sub_i32_e64 v0, s[0:1], s10, v0
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s11, v1
 ; GCN-NEXT:    v_mov_b32_e32 v3, s13
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s10, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s12, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v4
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s12, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e64 v4, s[2:3], s12, v0
+; GCN-NEXT:    v_subbrev_u32_e64 v5, vcc, 0, v2, s[2:3]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v6, v6, v7, vcc
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[2:3]
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s12, v4
+; GCN-NEXT:    v_subbrev_u32_e32 v2, vcc, 0, v2, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v6
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v5, s11
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v5, v1, vcc
+; GCN-NEXT:    v_subb_u32_e64 v1, vcc, v5, v1, s[0:1]
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v1
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
@@ -119,7 +119,7 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
@@ -127,97 +127,91 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-LABEL: s_test_srem:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN-IR-NEXT:    s_mov_b64 s[2:3], 0
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
-; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
-; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[0:1], 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s0
+; GCN-IR-NEXT:    s_add_i32 s14, s12, 32
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[8:9], s[10:11]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s1
+; GCN-IR-NEXT:    s_cmp_eq_u32 s1, 0
+; GCN-IR-NEXT:    s_cselect_b32 s10, s14, s8
+; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
+; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s7
+; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_cselect_b32 s14, s8, s9
+; GCN-IR-NEXT:    s_sub_u32 s8, s10, s14
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[8:9], 63
+; GCN-IR-NEXT:    s_mov_b32 s11, 0
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[16:17]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[16:17], s[8:9], 63
+; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[12:13], -1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[18:19], s[16:17]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s16, s8, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    s_addc_u32 s17, s9, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[16:17], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[6:7], s8
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
-; GCN-IR-NEXT:    s_add_u32 s8, s2, -1
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v2, v3
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s9, s3, -1
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[6:7], s16
+; GCN-IR-NEXT:    s_add_u32 s8, s0, -1
+; GCN-IR-NEXT:    s_addc_u32 s9, s1, -1
+; GCN-IR-NEXT:    s_not_b64 s[2:3], s[10:11]
+; GCN-IR-NEXT:    s_mov_b32 s15, s11
+; GCN-IR-NEXT:    s_add_u32 s10, s2, s14
+; GCN-IR-NEXT:    s_addc_u32 s11, s3, s11
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_mov_b32 s3, 0
 ; GCN-IR-NEXT:  BB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s9
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s8, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s2, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s3, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s2, s13, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[16:17], s[16:17], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[14:15], s[12:13]
+; GCN-IR-NEXT:    s_sub_u32 s2, s8, s16
+; GCN-IR-NEXT:    s_subb_u32 s2, s9, s17
+; GCN-IR-NEXT:    s_ashr_i32 s14, s2, 31
+; GCN-IR-NEXT:    s_mov_b32 s15, s14
+; GCN-IR-NEXT:    s_and_b32 s2, s14, 1
+; GCN-IR-NEXT:    s_and_b64 s[18:19], s[14:15], s[0:1]
+; GCN-IR-NEXT:    s_sub_u32 s16, s16, s18
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    s_subb_u32 s17, s17, s19
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[2:3]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB0_3
+; GCN-IR-NEXT:  BB0_4: ; %Flow6
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[12:13], 1
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[2:3], s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-IR-NEXT:    s_branch BB0_6
-; GCN-IR-NEXT:  BB0_4:
+; GCN-IR-NEXT:  BB0_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[12:13]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB0_7
-; GCN-IR-NEXT:  BB0_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB0_6: ; %Flow6
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB0_7: ; %udiv-end
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v1
-; GCN-IR-NEXT:    v_mul_hi_u32 v2, s2, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v3, s3, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, s2, v0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[12:13]
+; GCN-IR-NEXT:  BB0_6: ; %udiv-end
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s1, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s0, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
@@ -490,17 +484,17 @@ define amdgpu_kernel void @s_test_srem23_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    s_ashr_i32 s1, s1, 30
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_or_b32 s1, s1, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s1, s1, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s1, v2
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s0
 ; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 23
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
@@ -522,17 +516,17 @@ define amdgpu_kernel void @s_test_srem23_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    s_ashr_i32 s1, s1, 30
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-IR-NEXT:    s_or_b32 s1, s1, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-IR-NEXT:    s_cselect_b32 s1, s1, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s1, v2
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s0
 ; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 23
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
@@ -561,17 +555,17 @@ define amdgpu_kernel void @s_test_srem24_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    s_ashr_i32 s1, s1, 30
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_or_b32 s1, s1, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s1, s1, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s1, v2
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s0
 ; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
@@ -593,17 +587,17 @@ define amdgpu_kernel void @s_test_srem24_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    s_ashr_i32 s1, s1, 30
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-IR-NEXT:    s_or_b32 s1, s1, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-IR-NEXT:    s_cselect_b32 s1, s1, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s1, v2
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s0
 ; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
@@ -686,17 +680,17 @@ define amdgpu_kernel void @s_test_srem25_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    s_ashr_i32 s1, s1, 30
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_or_b32 s1, s1, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s1, s1, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s1, v2
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s0
 ; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 25
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
@@ -718,17 +712,17 @@ define amdgpu_kernel void @s_test_srem25_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    s_ashr_i32 s1, s1, 30
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-IR-NEXT:    s_or_b32 s1, s1, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-IR-NEXT:    s_cselect_b32 s1, s1, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s1, v2
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s0
 ; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 25
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
@@ -757,17 +751,17 @@ define amdgpu_kernel void @s_test_srem31_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    s_ashr_i32 s1, s1, 30
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    s_or_b32 s1, s1, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cselect_b32 s1, s1, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s1, v2
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s0
 ; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 31
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
@@ -789,17 +783,17 @@ define amdgpu_kernel void @s_test_srem31_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    s_ashr_i32 s1, s1, 30
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-IR-NEXT:    s_or_b32 s1, s1, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[8:9], |v1|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-IR-NEXT:    s_cselect_b32 s1, s1, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s1, v2
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s0
 ; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 31
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
@@ -817,27 +811,28 @@ define amdgpu_kernel void @s_test_srem32_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_srem32_64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s0, s[0:1], 0xe
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_load_dword s6, s[0:1], 0xe
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    s_mov_b32 s2, -1
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s7
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s0
-; GCN-NEXT:    s_xor_b32 s1, s7, s0
-; GCN-NEXT:    s_ashr_i32 s1, s1, 30
-; GCN-NEXT:    s_or_b32 s1, s1, 1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s6
+; GCN-NEXT:    s_xor_b32 s0, s7, s6
+; GCN-NEXT:    s_ashr_i32 s0, s0, 30
+; GCN-NEXT:    s_or_b32 s8, s0, 1
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s0
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s8, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s6
 ; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s7, v0
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -846,27 +841,28 @@ define amdgpu_kernel void @s_test_srem32_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-LABEL: s_test_srem32_64:
 ; GCN-IR:       ; %bb.0:
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_load_dword s0, s[0:1], 0xe
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_load_dword s6, s[0:1], 0xe
 ; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s2, -1
-; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s7
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s0
-; GCN-IR-NEXT:    s_xor_b32 s1, s7, s0
-; GCN-IR-NEXT:    s_ashr_i32 s1, s1, 30
-; GCN-IR-NEXT:    s_or_b32 s1, s1, 1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s6
+; GCN-IR-NEXT:    s_xor_b32 s0, s7, s6
+; GCN-IR-NEXT:    s_ashr_i32 s0, s0, 30
+; GCN-IR-NEXT:    s_or_b32 s8, s0, 1
 ; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s1
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s0
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-IR-NEXT:    s_cselect_b32 s0, s8, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s6
 ; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s7, v0
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -882,124 +878,124 @@ define amdgpu_kernel void @s_test_srem32_64(i64 addrspace(1)* %out, i64 %x, i64
 define amdgpu_kernel void @s_test_srem33_64(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_srem33_64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
 ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
-; GCN-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_mov_b32 s11, 0xf000
+; GCN-NEXT:    s_mov_b32 s10, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i64 s[2:3], s[10:11], 31
-; GCN-NEXT:    s_ashr_i64 s[4:5], s[0:1], 31
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[6:7], 31
+; GCN-NEXT:    s_ashr_i64 s[8:9], s[0:1], 31
 ; GCN-NEXT:    s_ashr_i32 s0, s1, 31
-; GCN-NEXT:    s_add_u32 s4, s4, s0
+; GCN-NEXT:    s_add_u32 s8, s8, s0
 ; GCN-NEXT:    s_mov_b32 s1, s0
-; GCN-NEXT:    s_addc_u32 s5, s5, s0
-; GCN-NEXT:    s_xor_b64 s[12:13], s[4:5], s[0:1]
+; GCN-NEXT:    s_addc_u32 s9, s9, s0
+; GCN-NEXT:    s_xor_b64 s[12:13], s[8:9], s[0:1]
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s12
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s13
-; GCN-NEXT:    s_sub_u32 s4, 0, s12
-; GCN-NEXT:    s_subb_u32 s5, 0, s13
-; GCN-NEXT:    s_ashr_i32 s10, s11, 31
+; GCN-NEXT:    s_sub_u32 s6, 0, s12
+; GCN-NEXT:    s_subb_u32 s8, 0, s13
+; GCN-NEXT:    s_mov_b32 s9, s5
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    s_mov_b32 s11, s10
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
-; GCN-NEXT:    v_mul_f32_e32 v2, 0x2f800000, v0
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v2
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT:    v_mul_hi_u32 v4, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, s4, v2
-; GCN-NEXT:    v_mul_lo_u32 v6, s5, v0
-; GCN-NEXT:    v_mul_lo_u32 v5, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
-; GCN-NEXT:    v_mul_hi_u32 v4, v0, v5
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v8, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v9, v2, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, v2, v3
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v7, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v8, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v5, v2, v5
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v8, v4
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v6, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v9, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
-; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v3
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v7, v5, vcc
-; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v2, v4, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v5, s4, v3
-; GCN-NEXT:    v_mul_hi_u32 v6, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v8, s5, v0
-; GCN-NEXT:    s_mov_b32 s5, s9
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; GCN-NEXT:    v_mul_lo_u32 v6, s4, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s6, v0
+; GCN-NEXT:    v_mul_lo_u32 v2, s6, v1
+; GCN-NEXT:    v_mul_lo_u32 v5, s8, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s6, v0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v6, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v7, v1, v2
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
+; GCN-NEXT:    v_mul_lo_u32 v6, v1, v4
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, v4
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v5, vcc
+; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
+; GCN-NEXT:    v_mul_lo_u32 v5, s6, v2
+; GCN-NEXT:    v_mul_hi_u32 v7, s6, v0
+; GCN-NEXT:    v_mul_lo_u32 v8, s8, v0
+; GCN-NEXT:    s_mov_b32 s8, s4
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
+; GCN-NEXT:    v_mul_lo_u32 v7, s6, v0
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
 ; GCN-NEXT:    v_mul_lo_u32 v10, v0, v5
 ; GCN-NEXT:    v_mul_hi_u32 v12, v0, v5
-; GCN-NEXT:    v_mul_hi_u32 v11, v0, v6
-; GCN-NEXT:    v_mul_hi_u32 v9, v3, v6
-; GCN-NEXT:    v_mul_lo_u32 v6, v3, v6
-; GCN-NEXT:    v_mul_hi_u32 v8, v3, v5
+; GCN-NEXT:    v_mul_hi_u32 v11, v0, v7
+; GCN-NEXT:    v_mul_hi_u32 v9, v2, v7
+; GCN-NEXT:    v_mul_lo_u32 v7, v2, v7
+; GCN-NEXT:    v_mul_hi_u32 v8, v2, v5
 ; GCN-NEXT:    v_add_i32_e32 v10, vcc, v11, v10
-; GCN-NEXT:    v_addc_u32_e32 v11, vcc, v7, v12, vcc
-; GCN-NEXT:    v_mul_lo_u32 v3, v3, v5
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v10, v6
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v11, v9, vcc
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v2, v5, s[0:1]
-; GCN-NEXT:    s_add_u32 s0, s2, s10
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
-; GCN-NEXT:    s_addc_u32 s1, s3, s10
-; GCN-NEXT:    s_xor_b64 s[14:15], s[0:1], s[10:11]
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v3, s14, v2
-; GCN-NEXT:    v_mul_hi_u32 v4, s14, v0
-; GCN-NEXT:    v_mul_hi_u32 v5, s14, v2
-; GCN-NEXT:    v_mul_hi_u32 v6, s15, v2
-; GCN-NEXT:    v_mul_lo_u32 v2, s15, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v7, v5, vcc
+; GCN-NEXT:    v_addc_u32_e32 v11, vcc, 0, v12, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, v5
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, v10, v7
+; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v11, v9, vcc
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
+; GCN-NEXT:    s_ashr_i32 s6, s7, 31
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[0:1]
+; GCN-NEXT:    s_add_u32 s0, s2, s6
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    s_mov_b32 s7, s6
+; GCN-NEXT:    s_addc_u32 s1, s3, s6
+; GCN-NEXT:    s_xor_b64 s[14:15], s[0:1], s[6:7]
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, s14, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s14, v0
+; GCN-NEXT:    v_mul_hi_u32 v5, s14, v1
+; GCN-NEXT:    v_mul_hi_u32 v7, s15, v1
+; GCN-NEXT:    v_mul_lo_u32 v1, s15, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v5, s15, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s15, v0
-; GCN-NEXT:    s_mov_b32 s4, s8
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v4, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v7, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v2, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v1, s12, v1
 ; GCN-NEXT:    v_mul_hi_u32 v2, s12, v0
 ; GCN-NEXT:    v_mul_lo_u32 v3, s13, v0
 ; GCN-NEXT:    v_mul_lo_u32 v0, s12, v0
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_sub_i32_e64 v0, s[0:1], s14, v0
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s15, v1
 ; GCN-NEXT:    v_mov_b32_e32 v3, s13
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s14, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s12, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v4
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s12, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e64 v4, s[2:3], s12, v0
+; GCN-NEXT:    v_subbrev_u32_e64 v5, vcc, 0, v2, s[2:3]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v6, v6, v7, vcc
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[2:3]
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s12, v4
+; GCN-NEXT:    v_subbrev_u32_e32 v2, vcc, 0, v2, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v6
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v5, s15
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v5, v1, vcc
+; GCN-NEXT:    v_subb_u32_e64 v1, vcc, v5, v1, s[0:1]
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v1
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
@@ -1008,14 +1004,14 @@ define amdgpu_kernel void @s_test_srem33_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_xor_b32_e32 v0, s10, v0
-; GCN-NEXT:    v_xor_b32_e32 v1, s10, v1
-; GCN-NEXT:    v_mov_b32_e32 v2, s10
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s10, v0
+; GCN-NEXT:    v_xor_b32_e32 v0, s6, v0
+; GCN-NEXT:    v_xor_b32_e32 v1, s6, v1
+; GCN-NEXT:    v_mov_b32_e32 v2, s6
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s6, v0
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_srem33_64:
@@ -1023,118 +1019,112 @@ define amdgpu_kernel void @s_test_srem33_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_ashr_i32 s2, s7, 31
-; GCN-IR-NEXT:    s_ashr_i64 s[10:11], s[0:1], 31
-; GCN-IR-NEXT:    s_ashr_i32 s0, s1, 31
-; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[6:7], 31
-; GCN-IR-NEXT:    s_mov_b32 s3, s2
+; GCN-IR-NEXT:    s_ashr_i64 s[2:3], s[6:7], 31
+; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[0:1], 31
+; GCN-IR-NEXT:    s_ashr_i32 s10, s1, 31
+; GCN-IR-NEXT:    s_ashr_i32 s0, s7, 31
 ; GCN-IR-NEXT:    s_mov_b32 s1, s0
-; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[8:9], s[2:3]
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[10:11], s[0:1]
-; GCN-IR-NEXT:    s_sub_u32 s6, s6, s2
-; GCN-IR-NEXT:    s_subb_u32 s7, s7, s2
-; GCN-IR-NEXT:    s_sub_u32 s8, s8, s0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s8
-; GCN-IR-NEXT:    s_subb_u32 s9, s9, s0
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[12:13], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    s_mov_b32 s11, s10
+; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[8:9], s[10:11]
+; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[2:3], s[0:1]
+; GCN-IR-NEXT:    s_sub_u32 s2, s2, s0
+; GCN-IR-NEXT:    s_subb_u32 s3, s3, s0
+; GCN-IR-NEXT:    s_sub_u32 s6, s6, s10
+; GCN-IR-NEXT:    s_subb_u32 s7, s7, s10
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[10:11]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[10:11], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[10:11], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[10:11]
-; GCN-IR-NEXT:    s_cbranch_vccz BB8_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[10:11], s[12:13]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
+; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_cselect_b32 s12, s10, s11
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
+; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
+; GCN-IR-NEXT:    s_cselect_b32 s16, s10, s11
+; GCN-IR-NEXT:    s_sub_u32 s10, s12, s16
+; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[10:11], 63
+; GCN-IR-NEXT:    s_mov_b32 s13, 0
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[18:19], s[10:11], 63
+; GCN-IR-NEXT:    s_xor_b64 s[20:21], s[14:15], -1
+; GCN-IR-NEXT:    s_and_b64 s[18:19], s[20:21], s[18:19]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB8_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s18, s10, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    s_addc_u32 s19, s11, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[18:19], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[2:3], s10
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
-; GCN-IR-NEXT:    s_add_u32 s10, s8, -1
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v2, v3
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, -1
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_lshr_b64 s[18:19], s[2:3], s18
+; GCN-IR-NEXT:    s_add_u32 s10, s6, -1
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, -1
+; GCN-IR-NEXT:    s_not_b64 s[8:9], s[12:13]
+; GCN-IR-NEXT:    s_mov_b32 s17, s13
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s16
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, s13
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
+; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:  BB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s11
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s10, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s8, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s9, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s8, s15, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[18:19], s[18:19], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[16:17], s[14:15]
+; GCN-IR-NEXT:    s_sub_u32 s8, s10, s18
+; GCN-IR-NEXT:    s_subb_u32 s8, s11, s19
+; GCN-IR-NEXT:    s_ashr_i32 s16, s8, 31
+; GCN-IR-NEXT:    s_mov_b32 s17, s16
+; GCN-IR-NEXT:    s_and_b32 s8, s16, 1
+; GCN-IR-NEXT:    s_and_b64 s[20:21], s[16:17], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s18, s18, s20
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-IR-NEXT:    s_subb_u32 s19, s19, s21
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], s[8:9]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB8_3
+; GCN-IR-NEXT:  BB8_4: ; %Flow6
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[14:15], 1
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
 ; GCN-IR-NEXT:    s_branch BB8_6
-; GCN-IR-NEXT:  BB8_4:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB8_7
 ; GCN-IR-NEXT:  BB8_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB8_6: ; %Flow6
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB8_7: ; %udiv-end
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, s8, v1
-; GCN-IR-NEXT:    v_mul_hi_u32 v2, s8, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v3, s9, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, s8, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[14:15]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GCN-IR-NEXT:  BB8_6: ; %udiv-end
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s6, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s6, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s7, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s6, v0
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, s2, v0
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s2, v0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
-; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, s0, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, s1, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
@@ -1174,7 +1164,7 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v4, v4
 ; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v1|
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v4
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
 ; GCN-NEXT:    v_mul_lo_u32 v0, v1, v0
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v2, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
@@ -1192,119 +1182,113 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_load_dword s0, s[0:1], 0xe
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_sext_i32_i16 s3, s3
+; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[2:3], 24
 ; GCN-IR-NEXT:    s_sext_i32_i16 s7, s0
-; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 24
-; GCN-IR-NEXT:    s_ashr_i32 s2, s3, 31
-; GCN-IR-NEXT:    s_ashr_i32 s10, s7, 31
-; GCN-IR-NEXT:    s_mov_b32 s3, s2
-; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[6:7], 24
-; GCN-IR-NEXT:    s_mov_b32 s11, s10
-; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[8:9], s[10:11]
-; GCN-IR-NEXT:    s_sub_u32 s6, s0, s2
-; GCN-IR-NEXT:    s_subb_u32 s7, s1, s2
-; GCN-IR-NEXT:    s_sub_u32 s8, s8, s10
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s8
-; GCN-IR-NEXT:    s_subb_u32 s9, s9, s10
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[12:13], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    s_ashr_i32 s0, s3, 31
+; GCN-IR-NEXT:    s_ashr_i32 s12, s7, 31
+; GCN-IR-NEXT:    s_mov_b32 s1, s0
+; GCN-IR-NEXT:    s_ashr_i64 s[10:11], s[6:7], 24
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[8:9], s[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[10:11], s[12:13]
+; GCN-IR-NEXT:    s_sub_u32 s2, s2, s0
+; GCN-IR-NEXT:    s_subb_u32 s3, s3, s0
+; GCN-IR-NEXT:    s_sub_u32 s6, s6, s12
+; GCN-IR-NEXT:    s_subb_u32 s7, s7, s12
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[10:11]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[10:11], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[10:11], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[10:11]
-; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[10:11], s[12:13]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
+; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_cselect_b32 s12, s10, s11
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
+; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
+; GCN-IR-NEXT:    s_cselect_b32 s16, s10, s11
+; GCN-IR-NEXT:    s_sub_u32 s10, s12, s16
+; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[10:11], 63
+; GCN-IR-NEXT:    s_mov_b32 s13, 0
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[18:19], s[10:11], 63
+; GCN-IR-NEXT:    s_xor_b64 s[20:21], s[14:15], -1
+; GCN-IR-NEXT:    s_and_b64 s[18:19], s[20:21], s[18:19]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB9_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s18, s10, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    s_addc_u32 s19, s11, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[18:19], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[2:3], s10
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
-; GCN-IR-NEXT:    s_add_u32 s10, s8, -1
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v2, v3
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, -1
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_lshr_b64 s[18:19], s[2:3], s18
+; GCN-IR-NEXT:    s_add_u32 s10, s6, -1
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, -1
+; GCN-IR-NEXT:    s_not_b64 s[8:9], s[12:13]
+; GCN-IR-NEXT:    s_mov_b32 s17, s13
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s16
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, s13
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
+; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:  BB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s11
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s10, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s8, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s9, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s8, s15, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[18:19], s[18:19], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[16:17], s[14:15]
+; GCN-IR-NEXT:    s_sub_u32 s8, s10, s18
+; GCN-IR-NEXT:    s_subb_u32 s8, s11, s19
+; GCN-IR-NEXT:    s_ashr_i32 s16, s8, 31
+; GCN-IR-NEXT:    s_mov_b32 s17, s16
+; GCN-IR-NEXT:    s_and_b32 s8, s16, 1
+; GCN-IR-NEXT:    s_and_b64 s[20:21], s[16:17], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s18, s18, s20
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-IR-NEXT:    s_subb_u32 s19, s19, s21
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], s[8:9]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB9_3
+; GCN-IR-NEXT:  BB9_4: ; %Flow3
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[14:15], 1
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
 ; GCN-IR-NEXT:    s_branch BB9_6
-; GCN-IR-NEXT:  BB9_4:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB9_7
 ; GCN-IR-NEXT:  BB9_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB9_6: ; %Flow3
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB9_7: ; %udiv-end
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, s8, v1
-; GCN-IR-NEXT:    v_mul_hi_u32 v2, s8, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v3, s9, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, s8, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[14:15]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GCN-IR-NEXT:  BB9_6: ; %udiv-end
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s6, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s6, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s7, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s6, v0
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, s2, v0
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s2, v0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
-; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, s0, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, s1, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:4
 ; GCN-IR-NEXT:    buffer_store_dword v0, off, s[4:7], 0
@@ -1404,25 +1388,25 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_lo_u32 v0, s8, v0
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_sub_i32_e64 v0, s[0:1], 24, v0
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 0, v1
 ; GCN-NEXT:    v_mov_b32_e32 v3, s9
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s8, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s9, v5
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s8, v4
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s8, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s9, v5
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e64 v4, s[2:3], s8, v0
+; GCN-NEXT:    v_subbrev_u32_e64 v5, vcc, 0, v2, s[2:3]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s9, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s8, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s9, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v6, v6, v7, vcc
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[2:3]
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s8, v4
+; GCN-NEXT:    v_subbrev_u32_e32 v2, vcc, 0, v2, vcc
+; GCN-NEXT:    v_subb_u32_e64 v1, vcc, 0, v1, s[0:1]
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v6
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s9, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s8, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
@@ -1430,104 +1414,98 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_srem_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
-; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_ashr_i32 s0, s7, 31
-; GCN-IR-NEXT:    s_mov_b32 s1, s0
-; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[6:7], s[0:1]
-; GCN-IR-NEXT:    s_sub_u32 s2, s2, s0
-; GCN-IR-NEXT:    s_subb_u32 s3, s3, s0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
-; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s6
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[6:7], 0, -1, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[6:7]
-; GCN-IR-NEXT:    s_cbranch_vccz BB10_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], 24, v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_ashr_i32 s4, s3, 31
+; GCN-IR-NEXT:    s_mov_b32 s5, s4
+; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
+; GCN-IR-NEXT:    s_sub_u32 s2, s2, s4
+; GCN-IR-NEXT:    s_subb_u32 s3, s3, s4
+; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s2
+; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s3
+; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
+; GCN-IR-NEXT:    s_cselect_b32 s8, s4, s5
+; GCN-IR-NEXT:    s_add_u32 s6, s8, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s7, 0, -1
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[6:7], 63
+; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[6:7], 63
+; GCN-IR-NEXT:    s_xor_b64 s[14:15], s[10:11], -1
+; GCN-IR-NEXT:    s_and_b64 s[12:13], s[14:15], s[12:13]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB10_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s12, s6, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    s_addc_u32 s13, s7, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s6, 63, s6
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], 24, s6
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    s_lshr_b64 s[14:15], 24, s12
 ; GCN-IR-NEXT:    s_add_u32 s6, s2, -1
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 58, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-IR-NEXT:    s_addc_u32 s7, s3, -1
-; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[0:1], 0, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  BB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s6, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s2, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s3, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s4, s11, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s4, s6, s14
+; GCN-IR-NEXT:    s_subb_u32 s4, s7, s15
+; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[12:13], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s14, s14, s16
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    s_subb_u32 s15, s15, s17
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB10_3
+; GCN-IR-NEXT:  BB10_4: ; %Flow5
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s5
 ; GCN-IR-NEXT:    s_branch BB10_6
-; GCN-IR-NEXT:  BB10_4:
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB10_7
 ; GCN-IR-NEXT:  BB10_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB10_6: ; %Flow5
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB10_7: ; %udiv-end
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[10:11]
+; GCN-IR-NEXT:  BB10_6: ; %udiv-end
 ; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v1
 ; GCN-IR-NEXT:    v_mul_hi_u32 v2, s2, v0
 ; GCN-IR-NEXT:    v_mul_lo_u32 v3, s3, v0
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, s2, v0
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    s_mov_b32 s6, -1
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = srem i64 24, %x
   store i64 %result, i64 addrspace(1)* %out
@@ -2076,17 +2054,17 @@ define amdgpu_kernel void @s_test_srem24_k_num_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s4
 ; GCN-NEXT:    s_ashr_i32 s5, s4, 30
 ; GCN-NEXT:    s_or_b32 s5, s5, 1
-; GCN-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v0
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v0
 ; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    v_mul_f32_e32 v1, s6, v1
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_mad_f32 v2, -v1, v0, s6
 ; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v0|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v2|, |v0|
+; GCN-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GCN-NEXT:    s_cselect_b32 s5, s5, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s5, v1
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s4
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
@@ -2103,17 +2081,17 @@ define amdgpu_kernel void @s_test_srem24_k_num_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s4
 ; GCN-IR-NEXT:    s_ashr_i32 s5, s4, 30
 ; GCN-IR-NEXT:    s_or_b32 s5, s5, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v1, v0
 ; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v1, v0
 ; GCN-IR-NEXT:    s_mov_b32 s2, -1
 ; GCN-IR-NEXT:    v_mul_f32_e32 v1, s6, v1
 ; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-IR-NEXT:    v_mad_f32 v2, -v1, v0, s6
 ; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v1, v1
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v0|
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v2|, |v0|
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GCN-IR-NEXT:    s_cselect_b32 s5, s5, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s5, v1
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s4
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
 ; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
@@ -2130,23 +2108,23 @@ define amdgpu_kernel void @s_test_srem24_k_den_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-LABEL: s_test_srem24_k_den_i64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_mov_b32 s8, 0x46b6fe00
+; GCN-NEXT:    s_mov_b32 s1, 0x46b6fe00
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_ashr_i64 s[6:7], s[6:7], 40
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s6
 ; GCN-NEXT:    s_ashr_i32 s0, s6, 30
-; GCN-NEXT:    s_or_b32 s0, s0, 1
-; GCN-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-NEXT:    v_mul_f32_e32 v2, 0x38331158, v0
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_mad_f32 v0, -v2, s8, v0
-; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s8
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-NEXT:    s_or_b32 s7, s0, 1
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mad_f32 v0, -v1, s1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
+; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v0|, s1
+; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    s_cselect_b32 s0, s7, 0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v1
 ; GCN-NEXT:    s_movk_i32 s0, 0x5b7f
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s0
 ; GCN-NEXT:    s_mov_b32 s0, s4
 ; GCN-NEXT:    s_mov_b32 s1, s5
@@ -2159,23 +2137,23 @@ define amdgpu_kernel void @s_test_srem24_k_den_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-IR-LABEL: s_test_srem24_k_den_i64:
 ; GCN-IR:       ; %bb.0:
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_mov_b32 s8, 0x46b6fe00
+; GCN-IR-NEXT:    s_mov_b32 s1, 0x46b6fe00
 ; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s2, -1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[6:7], 40
 ; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s6
 ; GCN-IR-NEXT:    s_ashr_i32 s0, s6, 30
-; GCN-IR-NEXT:    s_or_b32 s0, s0, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, 0x38331158, v0
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_mad_f32 v0, -v2, s8, v0
-; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s8
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-IR-NEXT:    s_or_b32 s7, s0, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mad_f32 v0, -v1, s1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v0|, s1
+; GCN-IR-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-IR-NEXT:    s_cselect_b32 s0, s7, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s0, v1
 ; GCN-IR-NEXT:    s_movk_i32 s0, 0x5b7f
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s0
 ; GCN-IR-NEXT:    s_mov_b32 s0, s4
 ; GCN-IR-NEXT:    s_mov_b32 s1, s5

diff  --git a/llvm/test/CodeGen/AMDGPU/trunc.ll b/llvm/test/CodeGen/AMDGPU/trunc.ll
index 7dc43cf485df..b2d144e41422 100644
--- a/llvm/test/CodeGen/AMDGPU/trunc.ll
+++ b/llvm/test/CodeGen/AMDGPU/trunc.ll
@@ -98,7 +98,8 @@ define amdgpu_kernel void @sgpr_trunc_i32_to_i1(i32 addrspace(1)* %out, i32 %a)
 ; VI: s_load_dwordx2 s{{\[}}[[SLO:[0-9]+]]:{{[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0x4c
 ; GCN: s_and_b32 [[MASKED:s[0-9]+]], 1, s[[SLO]]
 ; GCN: v_cmp_eq_u32_e64 s{{\[}}[[VLO:[0-9]+]]:[[VHI:[0-9]+]]], [[MASKED]], 1{{$}}
-; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, s{{\[}}[[VLO]]:[[VHI]]]
+; GCN: s_cmp_lg_u64 s{{\[}}[[VLO]]:[[VHI]]], 0
+; GCN: s_cselect_b32 {{s[0-9]+}}, 63, -12
 define amdgpu_kernel void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, [8 x i32], i64 %x) {
   %trunc = trunc i64 %x to i1
   %sel = select i1 %trunc, i32 63, i32 -12

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 6b29f2962043..7ebe78c16cb7 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -5,121 +5,121 @@
 define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_udiv_i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
-; GCN-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0xd
 ; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GCN-NEXT:    s_sub_u32 s4, 0, s2
-; GCN-NEXT:    s_subb_u32 s5, 0, s3
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s12
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s13
+; GCN-NEXT:    s_sub_u32 s2, 0, s12
+; GCN-NEXT:    s_subb_u32 s3, 0, s13
+; GCN-NEXT:    s_mov_b32 s4, s8
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_mov_b32 s5, s9
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
-; GCN-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v0
-; GCN-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v3
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT:    v_mul_hi_u32 v5, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s4, v3
-; GCN-NEXT:    v_mul_lo_u32 v7, s5, v0
-; GCN-NEXT:    v_mul_lo_u32 v6, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v7
-; GCN-NEXT:    v_mul_hi_u32 v5, v0, v6
-; GCN-NEXT:    v_mul_lo_u32 v7, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v9, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v8, v3, v6
-; GCN-NEXT:    v_mul_hi_u32 v6, v3, v6
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
-; GCN-NEXT:    v_mul_hi_u32 v9, v3, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v2, s2, v1
+; GCN-NEXT:    v_mul_lo_u32 v5, s3, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s2, v0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v6, v1, v4
+; GCN-NEXT:    v_mul_lo_u32 v4, v1, v4
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GCN-NEXT:    v_mul_hi_u32 v8, v1, v2
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v6, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v5, vcc
+; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
+; GCN-NEXT:    v_mul_lo_u32 v5, s2, v2
+; GCN-NEXT:    v_mul_hi_u32 v7, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v8, s3, v0
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
+; GCN-NEXT:    v_mul_lo_u32 v7, s2, v0
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v9, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v4
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v2, v6, vcc
-; GCN-NEXT:    v_addc_u32_e64 v4, vcc, v3, v5, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v6, s4, v4
-; GCN-NEXT:    v_mul_hi_u32 v7, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v8, s5, v0
-; GCN-NEXT:    s_mov_b32 s5, s9
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-NEXT:    v_mul_lo_u32 v7, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v8, v6
-; GCN-NEXT:    v_mul_lo_u32 v10, v0, v6
-; GCN-NEXT:    v_mul_hi_u32 v12, v0, v6
+; GCN-NEXT:    v_mul_lo_u32 v10, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v12, v0, v5
 ; GCN-NEXT:    v_mul_hi_u32 v11, v0, v7
-; GCN-NEXT:    v_mul_hi_u32 v9, v4, v7
-; GCN-NEXT:    v_mul_lo_u32 v7, v4, v7
-; GCN-NEXT:    v_mul_hi_u32 v8, v4, v6
+; GCN-NEXT:    v_mul_hi_u32 v9, v2, v7
+; GCN-NEXT:    v_mul_lo_u32 v7, v2, v7
+; GCN-NEXT:    v_mul_hi_u32 v8, v2, v5
 ; GCN-NEXT:    v_add_i32_e32 v10, vcc, v11, v10
-; GCN-NEXT:    v_addc_u32_e32 v11, vcc, v2, v12, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v4, v6
+; GCN-NEXT:    v_addc_u32_e32 v11, vcc, 0, v12, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, v5
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, v10, v7
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v11, v9, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v8, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v7, v4
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v2, v6, vcc
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v3, v6, s[0:1]
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s10, v3
-; GCN-NEXT:    v_mul_hi_u32 v5, s10, v0
-; GCN-NEXT:    v_mul_hi_u32 v6, s10, v3
-; GCN-NEXT:    v_mul_hi_u32 v7, s11, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s11, v3
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v2, v6, vcc
-; GCN-NEXT:    v_mul_lo_u32 v6, s11, v0
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[0:1]
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
+; GCN-NEXT:    v_mul_hi_u32 v5, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v7, s11, v1
+; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    s_mov_b32 s4, s8
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v5, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v7, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v2, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s2, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s2, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s3, v0
-; GCN-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, s12, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s12, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s13, v0
+; GCN-NEXT:    v_mov_b32_e32 v5, s13
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_mul_lo_u32 v3, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v3, s12, v0
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s11, v2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s10, v3
-; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s2, v3
-; GCN-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s3, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v5, s[0:1], 2, v0
-; GCN-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 1, v0
-; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[0:1]
+; GCN-NEXT:    v_sub_i32_e64 v3, s[0:1], s10, v3
+; GCN-NEXT:    v_subb_u32_e64 v4, vcc, v4, v5, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, s12, v3
+; GCN-NEXT:    v_subbrev_u32_e32 v4, vcc, 0, v4, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v4
+; GCN-NEXT:    v_cndmask_b32_e32 v4, v6, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v6, s11
-; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v6, v2, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s3, v2
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v6, v2, s[0:1]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v3
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s3, v2
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v2
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
@@ -128,93 +128,87 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-LABEL: s_test_udiv_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN-IR-NEXT:    s_mov_b64 s[2:3], 0
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
-; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[0:1], 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s0
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[10:11]
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s1
+; GCN-IR-NEXT:    s_cmp_eq_u32 s1, 0
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GCN-IR-NEXT:    s_cselect_b32 s8, s12, s8
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_cselect_b32 s12, s10, s11
+; GCN-IR-NEXT:    s_sub_u32 s10, s8, s12
+; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[10:11], 63
+; GCN-IR-NEXT:    s_mov_b32 s9, 0
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[16:17], s[10:11], 63
+; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[14:15], -1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[18:19], s[16:17]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s14, s10, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    s_addc_u32 s15, s11, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[14:15], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[6:7], s10
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
-; GCN-IR-NEXT:    s_add_u32 s6, s2, -1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v2, v3
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s7, s3, -1
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[6:7], s14
+; GCN-IR-NEXT:    s_add_u32 s6, s0, -1
+; GCN-IR-NEXT:    s_addc_u32 s7, s1, -1
+; GCN-IR-NEXT:    s_not_b64 s[2:3], s[8:9]
+; GCN-IR-NEXT:    s_mov_b32 s13, s9
+; GCN-IR-NEXT:    s_add_u32 s8, s2, s12
+; GCN-IR-NEXT:    s_addc_u32 s9, s3, s9
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_mov_b32 s3, 0
 ; GCN-IR-NEXT:  BB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s6, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s2, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s3, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s2, s11, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s2, s6, s14
+; GCN-IR-NEXT:    s_subb_u32 s2, s7, s15
+; GCN-IR-NEXT:    s_ashr_i32 s12, s2, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s2, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[12:13], s[0:1]
+; GCN-IR-NEXT:    s_sub_u32 s14, s14, s16
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    s_subb_u32 s15, s15, s17
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[2:3]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB0_3
+; GCN-IR-NEXT:  BB0_4: ; %Flow6
+; GCN-IR-NEXT:    s_lshl_b64 s[0:1], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-IR-NEXT:    s_branch BB0_6
-; GCN-IR-NEXT:  BB0_4:
+; GCN-IR-NEXT:  BB0_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[14:15]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB0_7
-; GCN-IR-NEXT:  BB0_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB0_6: ; %Flow6
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB0_7: ; %udiv-end
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GCN-IR-NEXT:  BB0_6: ; %udiv-end
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
@@ -801,24 +795,24 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-NEXT:    v_subb_u32_e32 v4, vcc, 0, v4, vcc
 ; GCN-NEXT:    v_sub_i32_e32 v5, vcc, v3, v0
 ; GCN-NEXT:    v_subbrev_u32_e32 v6, vcc, 0, v4, vcc
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v0
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, v0, v5
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v6
 ; GCN-NEXT:    v_cndmask_b32_e32 v5, -1, v5, vcc
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, 2, v1
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v2, vcc
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, 1, v1
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], v3, v0
 ; GCN-NEXT:    v_addc_u32_e32 v9, vcc, 0, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v0, -1, v0, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v5, v8, v6, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v1, v5, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v9, v7, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v1, s[0:1]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, v0, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v4
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v0, -1, v0, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v5, v8, v6, s[0:1]
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v5, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v1, v9, v7, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:4
 ; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
@@ -838,93 +832,87 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_and_b32 s0, s2, s9
 ; GCN-IR-NEXT:    s_and_b32 s3, s7, s8
 ; GCN-IR-NEXT:    s_and_b32 s2, s6, s9
-; GCN-IR-NEXT:    s_lshr_b64 s[2:3], s[2:3], 24
-; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
 ; GCN-IR-NEXT:    s_lshr_b64 s[6:7], s[0:1], 24
-; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-IR-NEXT:    s_lshr_b64 s[2:3], s[2:3], 24
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
+; GCN-IR-NEXT:    s_mov_b64 s[0:1], 0
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[10:11]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s2
+; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s3
+; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
+; GCN-IR-NEXT:    s_cselect_b32 s8, s8, s10
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
-; GCN-IR-NEXT:    s_cbranch_vccz BB7_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_cselect_b32 s12, s10, s11
+; GCN-IR-NEXT:    s_sub_u32 s10, s8, s12
+; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[10:11], 63
+; GCN-IR-NEXT:    s_mov_b32 s9, 0
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[16:17], s[10:11], 63
+; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[14:15], -1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[18:19], s[16:17]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB7_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s14, s10, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    s_addc_u32 s15, s11, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[14:15], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[6:7], s10
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
+; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[6:7], s14
 ; GCN-IR-NEXT:    s_add_u32 s6, s2, -1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v2, v3
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-IR-NEXT:    s_addc_u32 s7, s3, -1
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_not_b64 s[0:1], s[8:9]
+; GCN-IR-NEXT:    s_mov_b32 s13, s9
+; GCN-IR-NEXT:    s_add_u32 s8, s0, s12
+; GCN-IR-NEXT:    s_addc_u32 s9, s1, s9
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_mov_b32 s1, 0
 ; GCN-IR-NEXT:  BB7_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s6, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s2, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s3, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s0, s11, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[0:1]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s0, s6, s14
+; GCN-IR-NEXT:    s_subb_u32 s0, s7, s15
+; GCN-IR-NEXT:    s_ashr_i32 s12, s0, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s0, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[12:13], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s14, s14, s16
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    s_subb_u32 s15, s15, s17
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB7_3
+; GCN-IR-NEXT:  BB7_4: ; %Flow3
+; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[2:3]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-IR-NEXT:    s_branch BB7_6
-; GCN-IR-NEXT:  BB7_4:
+; GCN-IR-NEXT:  BB7_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[14:15]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB7_7
-; GCN-IR-NEXT:  BB7_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB7_6: ; %Flow3
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB7_7: ; %udiv-end
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GCN-IR-NEXT:  BB7_6: ; %udiv-end
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:4
@@ -1020,31 +1008,31 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_lo_u32 v3, s6, v0
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 24, v3
-; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s6, v3
-; GCN-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s7, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s6, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s7, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v5, s[0:1], 2, v0
-; GCN-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 1, v0
-; GCN-NEXT:    v_subb_u32_e32 v2, vcc, 0, v2, vcc
-; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
+; GCN-NEXT:    v_sub_i32_e64 v3, s[0:1], 24, v3
+; GCN-NEXT:    v_subb_u32_e64 v4, vcc, v4, v5, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, s6, v3
+; GCN-NEXT:    v_subbrev_u32_e32 v4, vcc, 0, v4, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s7, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s6, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s7, v4
+; GCN-NEXT:    v_cndmask_b32_e32 v4, v6, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, 0, v2, s[0:1]
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v4
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s7, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s6, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s7, v2
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
@@ -1052,84 +1040,78 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ;
 ; GCN-IR-LABEL: s_test_udiv_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
-; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
-; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[2:3], 0, -1, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[2:3], s[2:3], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
-; GCN-IR-NEXT:    s_cbranch_vccz BB8_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], 24, v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s2
+; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s3
+; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
+; GCN-IR-NEXT:    s_cselect_b32 s8, s4, s5
+; GCN-IR-NEXT:    s_add_u32 s6, s8, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s7, 0, -1
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[6:7], 63
+; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[6:7], 63
+; GCN-IR-NEXT:    s_xor_b64 s[14:15], s[10:11], -1
+; GCN-IR-NEXT:    s_and_b64 s[12:13], s[14:15], s[12:13]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB8_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s12, s6, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    s_addc_u32 s13, s7, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s6, 63, s6
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], 24, s6
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_add_u32 s2, s6, -1
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 58, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s3, s7, -1
-; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[0:1], 0, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_lshr_b64 s[14:15], 24, s12
+; GCN-IR-NEXT:    s_add_u32 s6, s2, -1
+; GCN-IR-NEXT:    s_addc_u32 s7, s3, -1
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  BB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s2, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s6, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s7, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s4, s11, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s4, s6, s14
+; GCN-IR-NEXT:    s_subb_u32 s4, s7, s15
+; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[12:13], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s14, s14, s16
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    s_subb_u32 s15, s15, s17
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB8_3
+; GCN-IR-NEXT:  BB8_4: ; %Flow5
+; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[4:5], s[2:3]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-IR-NEXT:    s_branch BB8_6
-; GCN-IR-NEXT:  BB8_4:
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB8_7
 ; GCN-IR-NEXT:  BB8_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB8_6: ; %Flow5
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB8_7: ; %udiv-end
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-IR-NEXT:    s_mov_b32 s6, -1
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[10:11]
+; GCN-IR-NEXT:  BB8_6: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = udiv i64 24, %x
   store i64 %result, i64 addrspace(1)* %out
@@ -1504,8 +1486,8 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_hi_u32 v3, v0, 24
 ; GCN-NEXT:    v_mul_lo_u32 v4, v0, 24
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s10, v4
 ; GCN-NEXT:    v_mov_b32_e32 v3, s11
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s10, v4
 ; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v3, v2, vcc
 ; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, 24, v4
 ; GCN-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v2, vcc
@@ -1516,98 +1498,93 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
 ; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], 23, v4
 ; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v2, -1, v4, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v8, v6, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v7, v5, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, 23, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v3
+; GCN-NEXT:    v_cndmask_b32_e32 v2, -1, v4, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GCN-NEXT:    v_cndmask_b32_e64 v3, v8, v6, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_udiv_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
-; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
-; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 59, v2
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[2:3], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[2:3], s[2:3], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
-; GCN-IR-NEXT:    s_cbranch_vccz BB11_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s2
+; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s3
+; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
+; GCN-IR-NEXT:    s_cselect_b32 s8, s4, s5
+; GCN-IR-NEXT:    s_sub_u32 s6, 59, s8
+; GCN-IR-NEXT:    s_subb_u32 s7, 0, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[6:7], 63
+; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[6:7], 63
+; GCN-IR-NEXT:    s_xor_b64 s[14:15], s[10:11], -1
+; GCN-IR-NEXT:    s_and_b64 s[12:13], s[14:15], s[12:13]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB11_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s10, s6, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s6, 63, s6
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s6
+; GCN-IR-NEXT:    s_cbranch_vccz BB11_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v3
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffc4, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], 0, -1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], s[2:3], s10
+; GCN-IR-NEXT:    s_add_u32 s2, s8, 0xffffffc4
+; GCN-IR-NEXT:    s_addc_u32 s3, 0, -1
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  BB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 23, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, 24, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[0:1], 0, v7, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, 23, s10
+; GCN-IR-NEXT:    s_subb_u32 s4, 0, s11
+; GCN-IR-NEXT:    s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT:    s_and_b32 s4, s8, 1
+; GCN-IR-NEXT:    s_and_b32 s8, s8, 24
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s8
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-IR-NEXT:    s_add_u32 s2, s2, 1
+; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB11_3
+; GCN-IR-NEXT:  BB11_4: ; %Flow5
+; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[6:7], 1
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[4:5], s[2:3]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-IR-NEXT:    s_branch BB11_6
-; GCN-IR-NEXT:  BB11_4:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB11_7
 ; GCN-IR-NEXT:  BB11_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB11_6: ; %Flow5
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB11_7: ; %udiv-end
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-IR-NEXT:    s_mov_b32 s6, -1
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[10:11]
+; GCN-IR-NEXT:  BB11_6: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = udiv i64 %x, 24
   store i64 %result, i64 addrspace(1)* %out

diff  --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
index 75d6eb57cb0b..97a6d0436ee8 100644
--- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
@@ -75,8 +75,9 @@ define amdgpu_kernel void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)
 ; uses an SGPR (implicit vcc).
 
 ; GCN-LABEL: {{^}}uint_to_fp_i1_to_f64:
-; GCN-DAG: v_cmp_eq_u32_e64 vcc
-; GCN-DAG: v_cndmask_b32_e32 v[[SEL:[0-9]+]], 0, v{{[0-9]+}}
+; GCN-DAG: s_cmp_eq_u32
+; GCN-DAG: s_cselect_b32 s[[SSEL:[0-9]+]], 0x3ff00000, 0
+; GCN-DAG: v_mov_b32_e32 v[[SEL:[0-9]+]], s[[SSEL]]
 ; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[ZERO]]:[[SEL]]{{\]}}
 ; GCN: s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 0aac641dae97..ef492be02506 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -6,7 +6,6 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-LABEL: s_test_urem_i64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0xd
-; GCN-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
@@ -18,99 +17,100 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-NEXT:    s_mov_b32 s4, s8
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    s_mov_b32 s5, s9
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
-; GCN-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v0
-; GCN-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v3
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT:    v_mul_hi_u32 v5, s2, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s2, v3
-; GCN-NEXT:    v_mul_lo_u32 v7, s3, v0
-; GCN-NEXT:    v_mul_lo_u32 v6, s2, v0
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v7
-; GCN-NEXT:    v_mul_hi_u32 v5, v0, v6
-; GCN-NEXT:    v_mul_lo_u32 v7, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v9, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v8, v3, v6
-; GCN-NEXT:    v_mul_hi_u32 v6, v3, v6
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
-; GCN-NEXT:    v_mul_hi_u32 v9, v3, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v9, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v4
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v2, v6, vcc
-; GCN-NEXT:    v_addc_u32_e64 v4, vcc, v3, v5, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v6, s2, v4
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s2, v0
+; GCN-NEXT:    v_mul_lo_u32 v2, s2, v1
+; GCN-NEXT:    v_mul_lo_u32 v5, s3, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s2, v0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v6, v1, v4
+; GCN-NEXT:    v_mul_lo_u32 v4, v1, v4
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GCN-NEXT:    v_mul_hi_u32 v8, v1, v2
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v6, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v5, vcc
+; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
+; GCN-NEXT:    v_mul_lo_u32 v5, s2, v2
 ; GCN-NEXT:    v_mul_hi_u32 v7, s2, v0
 ; GCN-NEXT:    v_mul_lo_u32 v8, s3, v0
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
 ; GCN-NEXT:    v_mul_lo_u32 v7, s2, v0
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v8, v6
-; GCN-NEXT:    v_mul_lo_u32 v10, v0, v6
-; GCN-NEXT:    v_mul_hi_u32 v12, v0, v6
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; GCN-NEXT:    v_mul_lo_u32 v10, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v12, v0, v5
 ; GCN-NEXT:    v_mul_hi_u32 v11, v0, v7
-; GCN-NEXT:    v_mul_hi_u32 v9, v4, v7
-; GCN-NEXT:    v_mul_lo_u32 v7, v4, v7
-; GCN-NEXT:    v_mul_hi_u32 v8, v4, v6
+; GCN-NEXT:    v_mul_hi_u32 v9, v2, v7
+; GCN-NEXT:    v_mul_lo_u32 v7, v2, v7
+; GCN-NEXT:    v_mul_hi_u32 v8, v2, v5
 ; GCN-NEXT:    v_add_i32_e32 v10, vcc, v11, v10
-; GCN-NEXT:    v_addc_u32_e32 v11, vcc, v2, v12, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v4, v6
+; GCN-NEXT:    v_addc_u32_e32 v11, vcc, 0, v12, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, v5
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, v10, v7
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v11, v9, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v8, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v7, v4
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v2, v6, vcc
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v3, v6, s[0:1]
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s10, v3
-; GCN-NEXT:    v_mul_hi_u32 v5, s10, v0
-; GCN-NEXT:    v_mul_hi_u32 v6, s10, v3
-; GCN-NEXT:    v_mul_hi_u32 v7, s11, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s11, v3
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v2, v6, vcc
-; GCN-NEXT:    v_mul_lo_u32 v6, s11, v0
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[0:1]
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
+; GCN-NEXT:    v_mul_hi_u32 v5, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v7, s11, v1
+; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v5, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v7, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v2, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v2, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v1, s12, v1
 ; GCN-NEXT:    v_mul_hi_u32 v2, s12, v0
 ; GCN-NEXT:    v_mul_lo_u32 v3, s13, v0
 ; GCN-NEXT:    v_mul_lo_u32 v0, s12, v0
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_sub_i32_e64 v0, s[0:1], s10, v0
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s11, v1
 ; GCN-NEXT:    v_mov_b32_e32 v3, s13
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s10, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s12, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v4
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s12, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e64 v4, s[2:3], s12, v0
+; GCN-NEXT:    v_subbrev_u32_e64 v5, vcc, 0, v2, s[2:3]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v6, v6, v7, vcc
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[2:3]
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s12, v4
+; GCN-NEXT:    v_subbrev_u32_e32 v2, vcc, 0, v2, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v6
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v5, s11
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v5, v1, vcc
+; GCN-NEXT:    v_subb_u32_e64 v1, vcc, v5, v1, s[0:1]
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v1
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
@@ -119,7 +119,7 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
@@ -127,97 +127,91 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-LABEL: s_test_urem_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; GCN-IR-NEXT:    s_mov_b64 s[2:3], 0
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
-; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
-; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[0:1], 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s0
+; GCN-IR-NEXT:    s_add_i32 s14, s12, 32
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[8:9], s[10:11]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s1
+; GCN-IR-NEXT:    s_cmp_eq_u32 s1, 0
+; GCN-IR-NEXT:    s_cselect_b32 s10, s14, s8
+; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
+; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s7
+; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_cselect_b32 s14, s8, s9
+; GCN-IR-NEXT:    s_sub_u32 s8, s10, s14
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[8:9], 63
+; GCN-IR-NEXT:    s_mov_b32 s11, 0
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[16:17]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[16:17], s[8:9], 63
+; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[12:13], -1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[18:19], s[16:17]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s16, s8, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    s_addc_u32 s17, s9, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[16:17], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[6:7], s8
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
-; GCN-IR-NEXT:    s_add_u32 s8, s2, -1
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v2, v3
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s9, s3, -1
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[6:7], s16
+; GCN-IR-NEXT:    s_add_u32 s8, s0, -1
+; GCN-IR-NEXT:    s_addc_u32 s9, s1, -1
+; GCN-IR-NEXT:    s_not_b64 s[2:3], s[10:11]
+; GCN-IR-NEXT:    s_mov_b32 s15, s11
+; GCN-IR-NEXT:    s_add_u32 s10, s2, s14
+; GCN-IR-NEXT:    s_addc_u32 s11, s3, s11
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_mov_b32 s3, 0
 ; GCN-IR-NEXT:  BB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s9
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s8, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s2, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s3, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s2, s13, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[16:17], s[16:17], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[14:15], s[12:13]
+; GCN-IR-NEXT:    s_sub_u32 s2, s8, s16
+; GCN-IR-NEXT:    s_subb_u32 s2, s9, s17
+; GCN-IR-NEXT:    s_ashr_i32 s14, s2, 31
+; GCN-IR-NEXT:    s_mov_b32 s15, s14
+; GCN-IR-NEXT:    s_and_b32 s2, s14, 1
+; GCN-IR-NEXT:    s_and_b64 s[18:19], s[14:15], s[0:1]
+; GCN-IR-NEXT:    s_sub_u32 s16, s16, s18
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    s_subb_u32 s17, s17, s19
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[2:3]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB0_3
+; GCN-IR-NEXT:  BB0_4: ; %Flow6
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[12:13], 1
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[2:3], s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-IR-NEXT:    s_branch BB0_6
-; GCN-IR-NEXT:  BB0_4:
+; GCN-IR-NEXT:  BB0_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[12:13]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB0_7
-; GCN-IR-NEXT:  BB0_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB0_6: ; %Flow6
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB0_7: ; %udiv-end
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v1
-; GCN-IR-NEXT:    v_mul_hi_u32 v2, s2, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v3, s3, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, s2, v0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[12:13]
+; GCN-IR-NEXT:  BB0_6: ; %udiv-end
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s1, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s0, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
@@ -826,25 +820,25 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_lo_u32 v0, s6, v0
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_sub_i32_e64 v0, s[0:1], 24, v0
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 0, v1
 ; GCN-NEXT:    v_mov_b32_e32 v3, s7
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s6, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s7, v5
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s6, v4
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s6, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s7, v5
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[0:1]
+; GCN-NEXT:    v_subrev_i32_e64 v4, s[2:3], s6, v0
+; GCN-NEXT:    v_subbrev_u32_e64 v5, vcc, 0, v2, s[2:3]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s7, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s6, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s7, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v6, v6, v7, vcc
+; GCN-NEXT:    v_subb_u32_e64 v2, vcc, v2, v3, s[2:3]
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s6, v4
+; GCN-NEXT:    v_subbrev_u32_e32 v2, vcc, 0, v2, vcc
+; GCN-NEXT:    v_subb_u32_e64 v1, vcc, 0, v1, s[0:1]
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[2:3], 0, v6
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s7, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s6, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
@@ -852,99 +846,93 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_urem_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
-; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
-; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[2:3], 0, -1, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[2:3], s[2:3], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
-; GCN-IR-NEXT:    s_cbranch_vccz BB6_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], 24, v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s2
+; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s3
+; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
+; GCN-IR-NEXT:    s_cselect_b32 s8, s4, s5
+; GCN-IR-NEXT:    s_add_u32 s6, s8, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s7, 0, -1
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[6:7], 63
+; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[6:7], 63
+; GCN-IR-NEXT:    s_xor_b64 s[14:15], s[10:11], -1
+; GCN-IR-NEXT:    s_and_b64 s[12:13], s[14:15], s[12:13]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB6_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s12, s6, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    s_addc_u32 s13, s7, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s6, 63, s6
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], 24, s6
+; GCN-IR-NEXT:    s_cbranch_vccz BB6_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_add_u32 s2, s6, -1
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 58, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s3, s7, -1
-; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[0:1], 0, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_lshr_b64 s[14:15], 24, s12
+; GCN-IR-NEXT:    s_add_u32 s6, s2, -1
+; GCN-IR-NEXT:    s_addc_u32 s7, s3, -1
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  BB6_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s2, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s6, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s7, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s4, s11, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s4, s6, s14
+; GCN-IR-NEXT:    s_subb_u32 s4, s7, s15
+; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[12:13], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s14, s14, s16
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    s_subb_u32 s15, s15, s17
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB6_3
+; GCN-IR-NEXT:  BB6_4: ; %Flow5
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s5
 ; GCN-IR-NEXT:    s_branch BB6_6
-; GCN-IR-NEXT:  BB6_4:
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB6_7
 ; GCN-IR-NEXT:  BB6_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB6_6: ; %Flow5
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB6_7: ; %udiv-end
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, s6, v1
-; GCN-IR-NEXT:    v_mul_hi_u32 v2, s6, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v3, s7, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, s6, v0
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[10:11]
+; GCN-IR-NEXT:  BB6_6: ; %udiv-end
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s2, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s3, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s2, v0
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    s_mov_b32 s6, -1
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = urem i64 24, %x
   store i64 %result, i64 addrspace(1)* %out
@@ -966,12 +954,12 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
+; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    v_mul_hi_u32 v2, v0, s2
 ; GCN-NEXT:    v_mul_lo_u32 v3, v1, s2
 ; GCN-NEXT:    v_mul_lo_u32 v4, v0, s2
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, v0, v2
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
@@ -984,7 +972,7 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_hi_u32 v4, v1, v4
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v3, vcc
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
+; GCN-NEXT:    s_mov_b32 s4, s8
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v3, v4, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v9, v7, vcc
@@ -996,7 +984,7 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_lo_u32 v5, v2, s2
 ; GCN-NEXT:    v_mul_lo_u32 v6, v0, s2
 ; GCN-NEXT:    v_subrev_i32_e32 v4, vcc, v0, v4
-; GCN-NEXT:    s_mov_b32 s9, s5
+; GCN-NEXT:    s_mov_b32 s5, s9
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v5
 ; GCN-NEXT:    v_mul_lo_u32 v5, v0, v4
 ; GCN-NEXT:    v_mul_hi_u32 v9, v0, v6
@@ -1016,15 +1004,15 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v4, s[0:1]
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s6, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s6, v0
-; GCN-NEXT:    v_mul_hi_u32 v4, s6, v1
-; GCN-NEXT:    v_mul_hi_u32 v5, s7, v1
-; GCN-NEXT:    v_mul_lo_u32 v1, s7, v1
+; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
+; GCN-NEXT:    v_mul_hi_u32 v4, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v5, s11, v1
+; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s7, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s7, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, s11, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
@@ -1034,8 +1022,8 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_lo_u32 v1, v1, 24
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, 24
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
-; GCN-NEXT:    v_mov_b32_e32 v2, s7
+; GCN-NEXT:    v_mov_b32_e32 v2, s11
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s10, v0
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
 ; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, 24, v0
 ; GCN-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v1, vcc
@@ -1045,104 +1033,99 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
 ; GCN-NEXT:    v_cndmask_b32_e32 v6, -1, v6, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], 23, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v5, -1, v5, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, 23, v0
+; GCN-NEXT:    v_cndmask_b32_e64 v3, v3, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT:    v_cndmask_b32_e32 v5, -1, v5, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v4, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_urem_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
-; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
-; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 59, v2
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[2:3], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[0:1], -1
-; GCN-IR-NEXT:    s_and_b64 s[2:3], s[2:3], vcc
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
-; GCN-IR-NEXT:    s_cbranch_vccz BB7_4
-; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s2
+; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s3
+; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
+; GCN-IR-NEXT:    s_cselect_b32 s6, s4, s5
+; GCN-IR-NEXT:    s_sub_u32 s8, 59, s6
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT:    s_xor_b64 s[14:15], s[10:11], -1
+; GCN-IR-NEXT:    s_and_b64 s[12:13], s[14:15], s[12:13]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB7_5
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
+; GCN-IR-NEXT:    s_sub_i32 s7, 63, s8
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s7
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v3
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffc4, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], 0, -1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s10
+; GCN-IR-NEXT:    s_add_u32 s6, s6, 0xffffffc4
+; GCN-IR-NEXT:    s_addc_u32 s7, 0, -1
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  BB7_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 23, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, 24, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[0:1], v6, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[0:1], 0, v7, s[0:1]
+; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s4, 23, s12
+; GCN-IR-NEXT:    s_subb_u32 s4, 0, s13
+; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
+; GCN-IR-NEXT:    s_and_b32 s10, s10, 24
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s10
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-IR-NEXT:    s_add_u32 s6, s6, 1
+; GCN-IR-NEXT:    s_addc_u32 s7, s7, 0
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_cbranch_vccz BB7_3
+; GCN-IR-NEXT:  BB7_4: ; %Flow5
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[8:9], 1
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s5
 ; GCN-IR-NEXT:    s_branch BB7_6
-; GCN-IR-NEXT:  BB7_4:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
-; GCN-IR-NEXT:    s_branch BB7_7
 ; GCN-IR-NEXT:  BB7_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB7_6: ; %Flow5
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB7_7: ; %udiv-end
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[10:11]
+; GCN-IR-NEXT:  BB7_6: ; %udiv-end
 ; GCN-IR-NEXT:    v_mul_hi_u32 v2, v0, 24
 ; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, 24
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, 24
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s2, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-IR-NEXT:    s_mov_b32 s6, -1
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = urem i64 %x, 24
   store i64 %result, i64 addrspace(1)* %out

diff  --git a/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll b/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
index 28fb550c44af..de326d50e166 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
@@ -13,13 +13,14 @@ target datalayout = "A5"
 ; GCN-ALLOCA-COUNT-4: buffer_store_dword
 ; GCN-ALLOCA:         buffer_load_dword
 
+; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 2
 ; GCN-PROMOTE: v_cmp_eq_u32_e64 [[CC1:[^,]+]], s{{[0-9]+}}, 1
+; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]]
-; GCN-PROMOTE: v_cmp_ne_u32_e64 [[CC2:[^,]+]], s{{[0-9]+}}, 2
-; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], [[CC2]]
-; GCN-PROMOTE: v_cmp_ne_u32_e64 [[CC3:[^,]+]], s{{[0-9]+}}, 3
-; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], [[CC3]]
-
+; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 3
+; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], vcc
+; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
+; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], vcc
 ; GCN-PROMOTE: ScratchSize: 0
 
 define amdgpu_kernel void @vector_read_alloca_bitcast(i32 addrspace(1)* %out, i32 %index) {
@@ -320,12 +321,14 @@ entry:
 ; GCN-ALLOCA-COUNT-4: buffer_store_dword
 ; GCN-ALLOCA:         buffer_load_dword
 
+; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 2
 ; GCN-PROMOTE: v_cmp_eq_u32_e64 [[CC1:[^,]+]], s{{[0-9]+}}, 1
+; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
 ; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]]
-; GCN-PROMOTE: v_cmp_ne_u32_e64 [[CC2:[^,]+]], s{{[0-9]+}}, 2
-; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], [[CC2]]
-; GCN-PROMOTE: v_cmp_ne_u32_e64 [[CC3:[^,]+]], s{{[0-9]+}}, 3
-; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], [[CC3]]
+; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 3
+; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], vcc
+; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0
+; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], vcc
 
 ; GCN-PROMOTE: ScratchSize: 0
 

diff  --git a/llvm/test/CodeGen/AMDGPU/vselect.ll b/llvm/test/CodeGen/AMDGPU/vselect.ll
index 02ffd30be5fd..41f03ee0e37c 100644
--- a/llvm/test/CodeGen/AMDGPU/vselect.ll
+++ b/llvm/test/CodeGen/AMDGPU/vselect.ll
@@ -7,10 +7,10 @@
 ; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Z
 ; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Y
 
-; SI: v_cmp_gt_i32_e32 vcc
-; SI: v_cndmask_b32_e32
-; SI: v_cmp_gt_i32_e32 vcc
-; SI: v_cndmask_b32_e32
+; SI: s_cmp_gt_i32
+; SI: s_cselect_b32
+; SI: s_cmp_gt_i32
+; SI: s_cselect_b32
 
 define amdgpu_kernel void @test_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1, <2 x i32> %val) {
 entry:
@@ -50,10 +50,10 @@ entry:
 ; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Z
 ; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Y
 
-; SI: v_cndmask_b32_e32
-; SI: v_cndmask_b32_e32
-; SI: v_cndmask_b32_e32
-; SI: v_cndmask_b32_e32
+; SI: s_cselect_b32
+; SI: s_cselect_b32
+; SI: s_cselect_b32
+; SI: s_cselect_b32
 
 define amdgpu_kernel void @test_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1, <4 x i32> %val) {
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index d9e1f0f00934..0b0b9a30f113 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -345,9 +345,9 @@ bb:
 ; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
 ; GFX1064: v_add_co_u32_e64 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
 ; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
-; GFX1064: v_sub_co_u32_e64 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
-; GFX1064: v_subrev_co_ci_u32_e64 v{{[0-9]+}}, s[{{[0-9:]+}}], {{[vs][0-9]+}}, v{{[0-9]+}}, vcc{{$}}
-; GFX1064: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, vcc{{$}}
+; GFX1064: v_sub_co_u32_e64 v{{[0-9]+}}, s[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}
+; GFX1064: v_subrev_co_ci_u32_e64 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, s[{{[0-9:]+}}]
+; GFX1064: v_sub_co_ci_u32_e64 v{{[0-9]+}}, s[{{[0-9:]+}}], {{[vs][0-9]+}}, v{{[0-9]+}}, s[{{[0-9:]+}}]
 define amdgpu_kernel void @test_udiv64(i64 addrspace(1)* %arg) #0 {
 bb:
   %tmp = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 1


        


More information about the llvm-commits mailing list