[llvm] 778351d - Revert "[AMDGPU] Enable compare operations to be selected by divergence"

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 24 08:21:39 PDT 2020


Author: Matt Arsenault
Date: 2020-06-24T11:21:30-04:00
New Revision: 778351df777b89641cadf038b17e416b0bbf31ab

URL: https://github.com/llvm/llvm-project/commit/778351df777b89641cadf038b17e416b0bbf31ab
DIFF: https://github.com/llvm/llvm-project/commit/778351df777b89641cadf038b17e416b0bbf31ab.diff

LOG: Revert "[AMDGPU] Enable compare operations to be selected by divergence"

This reverts commit 521ac0b5cea02f629d035f807460affbb65ae7ad.

Reported to break thousands of piglit tests.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
    llvm/lib/Target/AMDGPU/SIInstrInfo.h
    llvm/lib/Target/AMDGPU/SIInstrInfo.td
    llvm/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
    llvm/test/CodeGen/AMDGPU/addrspacecast.ll
    llvm/test/CodeGen/AMDGPU/amdgcn.private-memory.ll
    llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
    llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
    llvm/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll
    llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll
    llvm/test/CodeGen/AMDGPU/ctlz.ll
    llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
    llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
    llvm/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll
    llvm/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll
    llvm/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
    llvm/test/CodeGen/AMDGPU/fshl.ll
    llvm/test/CodeGen/AMDGPU/fshr.ll
    llvm/test/CodeGen/AMDGPU/i1-copy-from-loop.ll
    llvm/test/CodeGen/AMDGPU/icmp64.ll
    llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
    llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll
    llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
    llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
    llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
    llvm/test/CodeGen/AMDGPU/load-select-ptr.ll
    llvm/test/CodeGen/AMDGPU/loop_break.ll
    llvm/test/CodeGen/AMDGPU/min.ll
    llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll
    llvm/test/CodeGen/AMDGPU/or.ll
    llvm/test/CodeGen/AMDGPU/sad.ll
    llvm/test/CodeGen/AMDGPU/saddo.ll
    llvm/test/CodeGen/AMDGPU/sdiv64.ll
    llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
    llvm/test/CodeGen/AMDGPU/select-opt.ll
    llvm/test/CodeGen/AMDGPU/select-vectors.ll
    llvm/test/CodeGen/AMDGPU/selectcc-opt.ll
    llvm/test/CodeGen/AMDGPU/selectcc.ll
    llvm/test/CodeGen/AMDGPU/setcc-opt.ll
    llvm/test/CodeGen/AMDGPU/setcc.ll
    llvm/test/CodeGen/AMDGPU/setcc64.ll
    llvm/test/CodeGen/AMDGPU/shift-i128.ll
    llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
    llvm/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll
    llvm/test/CodeGen/AMDGPU/sign_extend.ll
    llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
    llvm/test/CodeGen/AMDGPU/sint_to_fp.ll
    llvm/test/CodeGen/AMDGPU/srem64.ll
    llvm/test/CodeGen/AMDGPU/udiv64.ll
    llvm/test/CodeGen/AMDGPU/udivrem.ll
    llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
    llvm/test/CodeGen/AMDGPU/uint_to_fp.ll
    llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
    llvm/test/CodeGen/AMDGPU/urem64.ll
    llvm/test/CodeGen/AMDGPU/v_cndmask.ll
    llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
    llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll
    llvm/test/CodeGen/AMDGPU/vselect.ll
    llvm/test/CodeGen/AMDGPU/zero_extend.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
index 5f1afdd7f10c..a0c25faa8256 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -602,12 +602,6 @@ void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
   }
 
   if (RC == &AMDGPU::SReg_64RegClass) {
-    if (SrcReg == AMDGPU::SCC) {
-      BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg)
-          .addImm(1)
-          .addImm(0);
-      return;
-    }
     if (DestReg == AMDGPU::VCC) {
       if (AMDGPU::SReg_64RegClass.contains(SrcReg)) {
         BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC)
@@ -4094,20 +4088,20 @@ unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const {
   case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32;
   case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32;
   case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32;
-  case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e64;
-  case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e64;
-  case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e64;
-  case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e64;
-  case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e64;
-  case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e64;
-  case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e64;
-  case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e64;
-  case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e64;
-  case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e64;
-  case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e64;
-  case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e64;
-  case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e64;
-  case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e64;
+  case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32;
+  case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32;
+  case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32;
+  case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32;
+  case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32;
+  case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32;
+  case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32;
+  case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32;
+  case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32;
+  case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32;
+  case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32;
+  case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32;
+  case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32;
+  case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32;
   case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64;
   case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32;
   case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32;
@@ -4498,13 +4492,13 @@ void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI,
       continue;
     }
 
-    if (RI.hasAGPRs(RI.getRegClassForReg(MRI, MO.getReg())) &&
+    if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) &&
         !isOperandLegal(MI, Idx, &MO)) {
       legalizeOpWithMove(MI, Idx);
       continue;
     }
 
-    if (!RI.isSGPRClass(RI.getRegClassForReg(MRI, MO.getReg())))
+    if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg())))
       continue; // VGPRs are legal
 
     // We can use one SGPR in each VOP3 instruction prior to GFX10
@@ -5140,7 +5134,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
 
     unsigned Opcode = Inst.getOpcode();
     unsigned NewOpcode = getVALUOp(Inst);
-    Register CondReg = RI.getVCC();
+
     // Handle some special cases
     switch (Opcode) {
     default:
@@ -5259,19 +5253,19 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
       continue;
 
     case AMDGPU::S_CBRANCH_SCC0:
-    case AMDGPU::S_CBRANCH_SCC1: {
+    case AMDGPU::S_CBRANCH_SCC1:
       // Clear unused bits of vcc
-      Register CondReg = Inst.getOperand(1).getReg();
-      bool IsSCC = CondReg == AMDGPU::SCC;
-      Register VCC = RI.getVCC();
-      Register EXEC = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
-      unsigned Opc = ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
-      BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(Opc), VCC)
-          .addReg(EXEC)
-          .addReg(IsSCC ? VCC : CondReg);
-      Inst.RemoveOperand(1);
+      if (ST.isWave32())
+        BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32),
+                AMDGPU::VCC_LO)
+            .addReg(AMDGPU::EXEC_LO)
+            .addReg(AMDGPU::VCC_LO);
+      else
+        BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64),
+                AMDGPU::VCC)
+            .addReg(AMDGPU::EXEC)
+            .addReg(AMDGPU::VCC);
       break;
-    }
 
     case AMDGPU::S_BFE_U64:
     case AMDGPU::S_BFM_B64:
@@ -5372,33 +5366,6 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
       Inst.eraseFromParent();
     }
       continue;
-    case AMDGPU::S_CMP_EQ_I32:
-    case AMDGPU::S_CMP_LG_I32:
-    case AMDGPU::S_CMP_GT_I32:
-    case AMDGPU::S_CMP_GE_I32:
-    case AMDGPU::S_CMP_LT_I32:
-    case AMDGPU::S_CMP_LE_I32:
-    case AMDGPU::S_CMP_EQ_U32:
-    case AMDGPU::S_CMP_LG_U32:
-    case AMDGPU::S_CMP_GT_U32:
-    case AMDGPU::S_CMP_GE_U32:
-    case AMDGPU::S_CMP_LT_U32:
-    case AMDGPU::S_CMP_LE_U32:
-    case AMDGPU::S_CMP_EQ_U64:
-    case AMDGPU::S_CMP_LG_U64: {
-      const MCInstrDesc &NewDesc = get(NewOpcode);
-      CondReg = MRI.createVirtualRegister(RI.getWaveMaskRegClass());
-      MachineInstr *NewInstr =
-          BuildMI(*MBB, Inst, Inst.getDebugLoc(), NewDesc, CondReg)
-              .add(Inst.getOperand(0))
-              .add(Inst.getOperand(1));
-      legalizeOperands(*NewInstr, MDT);
-      int SCCIdx = Inst.findRegisterDefOperandIdx(AMDGPU::SCC);
-      MachineOperand SCCOp = Inst.getOperand(SCCIdx);
-      addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg);
-      Inst.eraseFromParent();
-      continue;
-    }
     }
 
     if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) {
@@ -5420,7 +5387,7 @@ void SIInstrInfo::moveToVALU(MachineInstr &TopInst,
       if (Op.isReg() && Op.getReg() == AMDGPU::SCC) {
         // Only propagate through live-def of SCC.
         if (Op.isDef() && !Op.isDead())
-          addSCCDefUsersToVALUWorklist(Op, Inst, Worklist, RI.getVCC());
+          addSCCDefUsersToVALUWorklist(Op, Inst, Worklist);
         Inst.RemoveOperand(i);
       }
     }
@@ -5834,9 +5801,9 @@ void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist,
     &AMDGPU::SGPR_32RegClass;
 
   const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0);
-  const TargetRegisterClass *Src1RC =
-      Src1.isReg() ? RI.getRegClassForReg(MRI, Src1.getReg())
-                   : &AMDGPU::SGPR_32RegClass;
+  const TargetRegisterClass *Src1RC = Src1.isReg() ?
+    MRI.getRegClass(Src1.getReg()) :
+    &AMDGPU::SGPR_32RegClass;
 
   const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0);
 
@@ -6119,8 +6086,7 @@ void SIInstrInfo::movePackToVALU(SetVectorType &Worklist,
 
 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
                                                MachineInstr &SCCDefInst,
-                                               SetVectorType &Worklist,
-                                               Register NewCond) const {
+                                               SetVectorType &Worklist) const {
   // Ensure that def inst defines SCC, which is still live.
   assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() &&
          !Op.isDead() && Op.getParent() == &SCCDefInst);
@@ -6131,18 +6097,23 @@ void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op,
        make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)),
                   SCCDefInst.getParent()->end())) {
     // Check if SCC is used first.
-    int SCCIdx = MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI);
-    if (SCCIdx != -1) {
+    if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) {
       if (MI.isCopy()) {
         MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
         unsigned DestReg = MI.getOperand(0).getReg();
-        MRI.replaceRegWith(DestReg, NewCond);
+        SmallVector<MachineInstr *, 4> Users;
+        for (auto &User : MRI.use_nodbg_instructions(DestReg)) {
+          if ((User.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) ||
+              (User.getOpcode() == AMDGPU::S_SUB_CO_PSEUDO)) {
+            Users.push_back(&User);
+            Worklist.insert(&User);
+          }
+        }
+        for (auto &U : Users)
+          U->getOperand(4).setReg(RI.getVCC());
         CopyToDelete.push_back(&MI);
-      } else {
-        if (NewCond.isValid())
-          MI.getOperand(SCCIdx).setReg(NewCond);
+      } else
         Worklist.insert(&MI);
-      }
     }
     // Exit if we find another SCC def.
     if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1)

diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.h b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
index 188248c965af..0f0e8420f9cf 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.h
@@ -124,8 +124,7 @@ class SIInstrInfo final : public AMDGPUGenInstrInfo {
 
   void addSCCDefUsersToVALUWorklist(MachineOperand &Op,
                                     MachineInstr &SCCDefInst,
-                                    SetVectorType &Worklist,
-                                    Register NewCond = Register()) const;
+                                    SetVectorType &Worklist) const;
 
   const TargetRegisterClass *
   getDestEquivalentVGPRClass(const MachineInstr &Inst) const;

diff  --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index d88a2e34cce4..7aee52f91360 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -571,7 +571,15 @@ def atomic_store_local_64_m0 : PatFrag <
 def si_setcc_uniform : PatFrag <
   (ops node:$lhs, node:$rhs, node:$cond),
   (setcc node:$lhs, node:$rhs, node:$cond), [{
-  return !N->isDivergent();
+  for (SDNode *Use : N->uses()) {
+    if (Use->isMachineOpcode() || Use->getOpcode() != ISD::CopyToReg)
+      return false;
+
+    unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
+    if (Reg != AMDGPU::SCC)
+      return false;
+  }
+  return true;
 }]>;
 
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/AMDGPU/32-bit-local-address-space.ll b/llvm/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
index 77a95c049b85..72e62f8dbbfd 100644
--- a/llvm/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
+++ b/llvm/test/CodeGen/AMDGPU/32-bit-local-address-space.ll
@@ -57,8 +57,8 @@ entry:
 }
 
 ; FUNC-LABEL: {{^}}null_32bit_lds_ptr:
-; SI: s_cmp_lg_u32
-; SI: s_cselect_b64 vcc, 1, 0
+; SI: v_cmp_ne_u32
+; SI-NOT: v_cmp_ne_u32
 ; SI: v_cndmask_b32
 define amdgpu_kernel void @null_32bit_lds_ptr(i32 addrspace(1)* %out, i32 addrspace(3)* %lds) nounwind {
   %cmp = icmp ne i32 addrspace(3)* %lds, null

diff  --git a/llvm/test/CodeGen/AMDGPU/addrspacecast.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
index e7d34eb03a04..d16edbac75fe 100644
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
@@ -10,8 +10,7 @@
 ; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
 ; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10{{$}}
 ; CI-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]]
-; CI-DAG: s_cmp_lg_u32 [[PTR]], -1
-; CI-DAG: s_cselect_b64 vcc, 1, 0
+; CI-DAG: v_cmp_ne_u32_e64 vcc, [[PTR]], -1
 ; CI-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
 ; CI-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
 ; CI-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]]
@@ -23,8 +22,7 @@
 ; GFX9-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[SSRC_SHARED_BASE]]
 
 ; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_shared_base
-; GFX9: s_cmp_lg_u32 [[PTR]], -1
-; GFX9: s_cselect_b64 vcc, 1, 0
+; GFX9: v_cmp_ne_u32_e64 vcc, [[PTR]], -1
 ; GFX9: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
 ; GFX9-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
 ; GFX9-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]]
@@ -78,8 +76,7 @@ define void @use_group_to_flat_addrspacecast_func(i32 addrspace(3)* %ptr) #0 {
 ; CI-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]]
 
 ; CI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
-; CI-DAG: s_cmp_lg_u32 [[PTR]], -1
-; CI-DAG: s_cselect_b64 vcc, 1, 0
+; CI-DAG: v_cmp_ne_u32_e64 vcc, [[PTR]], -1
 ; CI-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
 ; CI-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
 ; CI-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]]
@@ -92,8 +89,7 @@ define void @use_group_to_flat_addrspacecast_func(i32 addrspace(3)* %ptr) #0 {
 ; GFX9-XXX: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], src_private_base
 
 ; GFX9-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
-; GFX9: s_cmp_lg_u32 [[PTR]], -1
-; GFX9: s_cselect_b64 vcc, 1, 0
+; GFX9: v_cmp_ne_u32_e64 vcc, [[PTR]], -1
 ; GFX9: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
 ; GFX9: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[PTR]]
 ; GFX9-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, [[VPTR]]
@@ -152,8 +148,7 @@ define amdgpu_kernel void @use_constant_to_global_addrspacecast(i32 addrspace(4)
 ; HSA: enable_sgpr_queue_ptr = 0
 
 ; HSA: s_load_dwordx2 s{{\[}}[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]{{\]}}
-; CI-DAG: v_cmp_ne_u64_e64 vcc, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}}
-; GFX9-DAG: s_cmp_lg_u64 s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}}
+; HSA-DAG: v_cmp_ne_u64_e64 vcc, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}}
 ; HSA-DAG: v_mov_b32_e32 v[[VPTR_LO:[0-9]+]], s[[PTR_LO]]
 ; HSA-DAG: v_cndmask_b32_e32 [[CASTPTR:v[0-9]+]], -1, v[[VPTR_LO]]
 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 0{{$}}
@@ -170,8 +165,7 @@ define amdgpu_kernel void @use_flat_to_group_addrspacecast(i32* %ptr) #0 {
 ; HSA: enable_sgpr_queue_ptr = 0
 
 ; HSA: s_load_dwordx2 s{{\[}}[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]{{\]}}
-; CI-DAG: v_cmp_ne_u64_e64 vcc, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}}
-; GFX9-DAG: s_cmp_lg_u64 s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}}
+; HSA-DAG: v_cmp_ne_u64_e64 vcc, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0{{$}}
 ; HSA-DAG: v_mov_b32_e32 v[[VPTR_LO:[0-9]+]], s[[PTR_LO]]
 ; HSA-DAG: v_cndmask_b32_e32 [[CASTPTR:v[0-9]+]], -1, v[[VPTR_LO]]
 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 0{{$}}

diff  --git a/llvm/test/CodeGen/AMDGPU/amdgcn.private-memory.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.private-memory.ll
index c5f9adeeccab..d38452a3a22a 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.private-memory.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.private-memory.ll
@@ -18,8 +18,7 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
 
 ; GCN-ALLOCA: v_add_{{[iu]}}32_e32 [[RESULT:v[0-9]+]], vcc, v{{[0-9]+}}, v0
 
-; GCN-PROMOTE: s_cmp_eq_u32 [[IN]], 1
-; GCN-PROMOTE: s_cselect_b64 vcc, 1, 0
+; GCN-PROMOTE: v_cmp_eq_u32_e64 vcc, [[IN]], 1
 ; GCN-PROMOTE-NEXT: v_addc_u32_e32 [[RESULT:v[0-9]+]], vcc, 0, v0, vcc
 
 ; GCN: buffer_store_dword [[RESULT]]

diff  --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index c7da2923b94e..de7521c3d30d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -42,7 +42,7 @@ define amdgpu_kernel void @udiv_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
 ; CHECK-NEXT:    [[TMP35:%.*]] = sub i32 [[TMP28]], 1
 ; CHECK-NEXT:    [[TMP36:%.*]] = select i1 [[TMP33]], i32 [[TMP34]], i32 [[TMP28]]
 ; CHECK-NEXT:    [[TMP37:%.*]] = select i1 [[TMP32]], i32 [[TMP36]], i32 [[TMP35]]
-; CHECK-NEXT:    store i32 [[TMP37]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[TMP37]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_i32:
@@ -121,7 +121,7 @@ define amdgpu_kernel void @urem_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
 ; CHECK-NEXT:    [[TMP35:%.*]] = add i32 [[TMP30]], [[Y]]
 ; CHECK-NEXT:    [[TMP36:%.*]] = select i1 [[TMP33]], i32 [[TMP34]], i32 [[TMP30]]
 ; CHECK-NEXT:    [[TMP37:%.*]] = select i1 [[TMP32]], i32 [[TMP36]], i32 [[TMP35]]
-; CHECK-NEXT:    store i32 [[TMP37]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[TMP37]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_i32:
@@ -209,7 +209,7 @@ define amdgpu_kernel void @sdiv_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
 ; CHECK-NEXT:    [[TMP44:%.*]] = select i1 [[TMP39]], i32 [[TMP43]], i32 [[TMP42]]
 ; CHECK-NEXT:    [[TMP45:%.*]] = xor i32 [[TMP44]], [[TMP3]]
 ; CHECK-NEXT:    [[TMP46:%.*]] = sub i32 [[TMP45]], [[TMP3]]
-; CHECK-NEXT:    store i32 [[TMP46]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[TMP46]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_i32:
@@ -221,17 +221,17 @@ define amdgpu_kernel void @sdiv_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_ashr_i32 s8, s3, 31
 ; GCN-NEXT:    s_add_i32 s3, s3, s8
-; GCN-NEXT:    s_xor_b32 s3, s3, s8
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s3
-; GCN-NEXT:    s_ashr_i32 s9, s2, 31
-; GCN-NEXT:    s_add_i32 s2, s2, s9
-; GCN-NEXT:    s_xor_b32 s2, s2, s9
+; GCN-NEXT:    s_xor_b32 s9, s3, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s9
+; GCN-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s3
+; GCN-NEXT:    s_xor_b32 s2, s2, s3
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    s_xor_b32 s8, s9, s8
+; GCN-NEXT:    s_xor_b32 s3, s3, s8
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s3
-; GCN-NEXT:    v_mul_hi_u32 v2, v0, s3
+; GCN-NEXT:    v_mul_lo_u32 v1, v0, s9
+; GCN-NEXT:    v_mul_hi_u32 v2, v0, s9
 ; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
@@ -240,17 +240,17 @@ define amdgpu_kernel void @sdiv_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
 ; GCN-NEXT:    v_mul_hi_u32 v0, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s3
+; GCN-NEXT:    v_mul_lo_u32 v1, v0, s9
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s2, v1
 ; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, s2, v1
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v4
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s9, v4
 ; GCN-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
-; GCN-NEXT:    v_xor_b32_e32 v0, s8, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v0
+; GCN-NEXT:    v_xor_b32_e32 v0, s3, v0
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s3, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %r = sdiv i32 %x, %y
@@ -305,7 +305,7 @@ define amdgpu_kernel void @srem_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
 ; CHECK-NEXT:    [[TMP43:%.*]] = select i1 [[TMP38]], i32 [[TMP42]], i32 [[TMP41]]
 ; CHECK-NEXT:    [[TMP44:%.*]] = xor i32 [[TMP43]], [[TMP1]]
 ; CHECK-NEXT:    [[TMP45:%.*]] = sub i32 [[TMP44]], [[TMP1]]
-; CHECK-NEXT:    store i32 [[TMP45]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[TMP45]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_i32:
@@ -316,17 +316,17 @@ define amdgpu_kernel void @srem_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_ashr_i32 s2, s5, 31
 ; GCN-NEXT:    s_add_i32 s3, s5, s2
-; GCN-NEXT:    s_xor_b32 s8, s3, s2
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
-; GCN-NEXT:    s_ashr_i32 s9, s4, 31
-; GCN-NEXT:    s_add_i32 s4, s4, s9
-; GCN-NEXT:    s_xor_b32 s10, s4, s9
+; GCN-NEXT:    s_xor_b32 s10, s3, s2
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s10
+; GCN-NEXT:    s_ashr_i32 s8, s4, 31
+; GCN-NEXT:    s_add_i32 s4, s4, s8
+; GCN-NEXT:    s_xor_b32 s9, s4, s8
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s8
-; GCN-NEXT:    v_mul_hi_u32 v2, v0, s8
+; GCN-NEXT:    v_mul_lo_u32 v1, v0, s10
+; GCN-NEXT:    v_mul_hi_u32 v2, v0, s10
 ; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], 0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[2:3]
@@ -334,18 +334,18 @@ define amdgpu_kernel void @srem_i32(i32 addrspace(1)* %out, i32 %x, i32 %y) {
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[2:3]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s10
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s8
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s10, v0
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s10, v0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s8, v1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, s8, v1
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s9
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s10
+; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s9, v0
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s9, v0
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s10, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, s10, v1
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s10, v1
 ; GCN-NEXT:    s_and_b64 vcc, s[0:1], s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[2:3]
-; GCN-NEXT:    v_xor_b32_e32 v0, s9, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s9, v0
+; GCN-NEXT:    v_xor_b32_e32 v0, s8, v0
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
@@ -373,7 +373,7 @@ define amdgpu_kernel void @udiv_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
 ; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i32 [[TMP15]], 65535
 ; CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i16
-; CHECK-NEXT:    store i16 [[TMP17]], i16 addrspace(1)* [[OUT:%.*]], align 2
+; CHECK-NEXT:    store i16 [[TMP17]], i16 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_i16:
@@ -422,7 +422,7 @@ define amdgpu_kernel void @urem_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
 ; CHECK-NEXT:    [[TMP17:%.*]] = sub i32 [[TMP1]], [[TMP16]]
 ; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 65535
 ; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16
-; CHECK-NEXT:    store i16 [[TMP19]], i16 addrspace(1)* [[OUT:%.*]], align 2
+; CHECK-NEXT:    store i16 [[TMP19]], i16 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_i16:
@@ -475,7 +475,7 @@ define amdgpu_kernel void @sdiv_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
 ; CHECK-NEXT:    [[TMP19:%.*]] = shl i32 [[TMP18]], 16
 ; CHECK-NEXT:    [[TMP20:%.*]] = ashr i32 [[TMP19]], 16
 ; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16
-; CHECK-NEXT:    store i16 [[TMP21]], i16 addrspace(1)* [[OUT:%.*]], align 2
+; CHECK-NEXT:    store i16 [[TMP21]], i16 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_i16:
@@ -533,7 +533,7 @@ define amdgpu_kernel void @srem_i16(i16 addrspace(1)* %out, i16 %x, i16 %y) {
 ; CHECK-NEXT:    [[TMP21:%.*]] = shl i32 [[TMP20]], 16
 ; CHECK-NEXT:    [[TMP22:%.*]] = ashr i32 [[TMP21]], 16
 ; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i16
-; CHECK-NEXT:    store i16 [[TMP23]], i16 addrspace(1)* [[OUT:%.*]], align 2
+; CHECK-NEXT:    store i16 [[TMP23]], i16 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_i16:
@@ -587,7 +587,7 @@ define amdgpu_kernel void @udiv_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
 ; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i32 [[TMP15]], 255
 ; CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
-; CHECK-NEXT:    store i8 [[TMP17]], i8 addrspace(1)* [[OUT:%.*]], align 1
+; CHECK-NEXT:    store i8 [[TMP17]], i8 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_i8:
@@ -634,7 +634,7 @@ define amdgpu_kernel void @urem_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
 ; CHECK-NEXT:    [[TMP17:%.*]] = sub i32 [[TMP1]], [[TMP16]]
 ; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 255
 ; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i8
-; CHECK-NEXT:    store i8 [[TMP19]], i8 addrspace(1)* [[OUT:%.*]], align 1
+; CHECK-NEXT:    store i8 [[TMP19]], i8 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_i8:
@@ -686,7 +686,7 @@ define amdgpu_kernel void @sdiv_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
 ; CHECK-NEXT:    [[TMP19:%.*]] = shl i32 [[TMP18]], 24
 ; CHECK-NEXT:    [[TMP20:%.*]] = ashr i32 [[TMP19]], 24
 ; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i8
-; CHECK-NEXT:    store i8 [[TMP21]], i8 addrspace(1)* [[OUT:%.*]], align 1
+; CHECK-NEXT:    store i8 [[TMP21]], i8 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_i8:
@@ -744,7 +744,7 @@ define amdgpu_kernel void @srem_i8(i8 addrspace(1)* %out, i8 %x, i8 %y) {
 ; CHECK-NEXT:    [[TMP21:%.*]] = shl i32 [[TMP20]], 24
 ; CHECK-NEXT:    [[TMP22:%.*]] = ashr i32 [[TMP21]], 24
 ; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i8
-; CHECK-NEXT:    store i8 [[TMP23]], i8 addrspace(1)* [[OUT:%.*]], align 1
+; CHECK-NEXT:    store i8 [[TMP23]], i8 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_i8:
@@ -942,7 +942,7 @@ define amdgpu_kernel void @udiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x
 ; CHECK-NEXT:    [[TMP158:%.*]] = select i1 [[TMP155]], i32 [[TMP156]], i32 [[TMP150]]
 ; CHECK-NEXT:    [[TMP159:%.*]] = select i1 [[TMP154]], i32 [[TMP158]], i32 [[TMP157]]
 ; CHECK-NEXT:    [[TMP160:%.*]] = insertelement <4 x i32> [[TMP120]], i32 [[TMP159]], i64 3
-; CHECK-NEXT:    store <4 x i32> [[TMP160]], <4 x i32> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <4 x i32> [[TMP160]], <4 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v4i32:
@@ -1214,7 +1214,7 @@ define amdgpu_kernel void @urem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x
 ; CHECK-NEXT:    [[TMP158:%.*]] = select i1 [[TMP155]], i32 [[TMP156]], i32 [[TMP152]]
 ; CHECK-NEXT:    [[TMP159:%.*]] = select i1 [[TMP154]], i32 [[TMP158]], i32 [[TMP157]]
 ; CHECK-NEXT:    [[TMP160:%.*]] = insertelement <4 x i32> [[TMP120]], i32 [[TMP159]], i64 3
-; CHECK-NEXT:    store <4 x i32> [[TMP160]], <4 x i32> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <4 x i32> [[TMP160]], <4 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_v4i32:
@@ -1522,55 +1522,55 @@ define amdgpu_kernel void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x
 ; CHECK-NEXT:    [[TMP194:%.*]] = xor i32 [[TMP193]], [[TMP152]]
 ; CHECK-NEXT:    [[TMP195:%.*]] = sub i32 [[TMP194]], [[TMP152]]
 ; CHECK-NEXT:    [[TMP196:%.*]] = insertelement <4 x i32> [[TMP147]], i32 [[TMP195]], i64 3
-; CHECK-NEXT:    store <4 x i32> [[TMP196]], <4 x i32> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <4 x i32> [[TMP196]], <4 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_v4i32:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx8 s[12:19], s[0:1], 0xd
-; GCN-NEXT:    s_mov_b32 s20, 0x4f800000
 ; GCN-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x9
 ; GCN-NEXT:    s_mov_b32 s11, 0xf000
 ; GCN-NEXT:    s_mov_b32 s10, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_ashr_i32 s2, s16, 31
 ; GCN-NEXT:    s_add_i32 s3, s16, s2
-; GCN-NEXT:    s_xor_b32 s3, s3, s2
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s3
+; GCN-NEXT:    s_xor_b32 s5, s3, s2
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s5
+; GCN-NEXT:    s_mov_b32 s16, 0x4f800000
 ; GCN-NEXT:    s_ashr_i32 s6, s17, 31
 ; GCN-NEXT:    s_add_i32 s0, s17, s6
-; GCN-NEXT:    s_xor_b32 s7, s0, s6
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_cvt_f32_u32_e32 v3, s7
-; GCN-NEXT:    s_ashr_i32 s4, s12, 31
-; GCN-NEXT:    s_add_i32 s5, s12, s4
-; GCN-NEXT:    v_mul_f32_e32 v0, s20, v0
+; GCN-NEXT:    s_xor_b32 s17, s0, s6
+; GCN-NEXT:    v_cvt_f32_u32_e32 v3, s17
+; GCN-NEXT:    s_ashr_i32 s3, s12, 31
+; GCN-NEXT:    v_mul_f32_e32 v0, s16, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    s_xor_b32 s5, s5, s4
-; GCN-NEXT:    s_xor_b32 s12, s4, s2
-; GCN-NEXT:    s_ashr_i32 s16, s13, 31
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s3
-; GCN-NEXT:    v_mul_hi_u32 v2, v0, s3
-; GCN-NEXT:    s_add_i32 s13, s13, s16
-; GCN-NEXT:    s_xor_b32 s13, s13, s16
+; GCN-NEXT:    s_add_i32 s4, s12, s3
+; GCN-NEXT:    s_xor_b32 s4, s4, s3
+; GCN-NEXT:    s_xor_b32 s7, s3, s2
+; GCN-NEXT:    v_mul_lo_u32 v1, v0, s5
+; GCN-NEXT:    v_mul_hi_u32 v2, v0, s5
+; GCN-NEXT:    s_ashr_i32 s12, s13, 31
+; GCN-NEXT:    s_add_i32 s13, s13, s12
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v4, s[0:1]
 ; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v3
+; GCN-NEXT:    s_xor_b32 s13, s13, s12
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v1, v0
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s5
-; GCN-NEXT:    v_mul_f32_e32 v1, s20, v2
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s4
+; GCN-NEXT:    v_mul_f32_e32 v1, s16, v2
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s3
+; GCN-NEXT:    v_mul_lo_u32 v2, v0, s5
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, s7
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s5, v2
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s3, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, s7
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s5, v2
+; GCN-NEXT:    v_mul_hi_u32 v5, v1, s17
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s4, v2
+; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s5, v4
+; GCN-NEXT:    v_mul_lo_u32 v4, v1, s17
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s4, v2
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v5
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
 ; GCN-NEXT:    v_sub_i32_e32 v6, vcc, 0, v4
@@ -1584,55 +1584,55 @@ define amdgpu_kernel void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x
 ; GCN-NEXT:    s_ashr_i32 s5, s18, 31
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
 ; GCN-NEXT:    s_add_i32 s0, s18, s5
-; GCN-NEXT:    s_xor_b32 s4, s16, s6
-; GCN-NEXT:    s_xor_b32 s6, s0, s5
-; GCN-NEXT:    v_cvt_f32_u32_e32 v4, s6
+; GCN-NEXT:    s_xor_b32 s4, s12, s6
+; GCN-NEXT:    s_xor_b32 s12, s0, s5
+; GCN-NEXT:    v_cvt_f32_u32_e32 v4, s12
 ; GCN-NEXT:    v_mul_hi_u32 v1, v1, s13
-; GCN-NEXT:    v_xor_b32_e32 v0, s12, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s12, v0
+; GCN-NEXT:    v_xor_b32_e32 v0, s7, v0
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s7, v0
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v4, v4
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, s7
-; GCN-NEXT:    v_mul_f32_e32 v4, s20, v4
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, s17
+; GCN-NEXT:    s_ashr_i32 s6, s19, 31
+; GCN-NEXT:    v_mul_f32_e32 v4, s16, v4
 ; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s13, v2
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v4, v4
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s7, v3
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s17, v3
 ; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s13, v2
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v1
 ; GCN-NEXT:    s_and_b64 vcc, s[0:1], s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[2:3]
-; GCN-NEXT:    v_mul_lo_u32 v2, v4, s6
-; GCN-NEXT:    v_mul_hi_u32 v3, v4, s6
-; GCN-NEXT:    s_ashr_i32 s7, s19, 31
+; GCN-NEXT:    v_mul_lo_u32 v2, v4, s12
+; GCN-NEXT:    v_mul_hi_u32 v3, v4, s12
 ; GCN-NEXT:    s_ashr_i32 s2, s14, 31
+; GCN-NEXT:    s_add_i32 s3, s14, s2
 ; GCN-NEXT:    v_sub_i32_e32 v5, vcc, 0, v2
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v5, s[0:1]
 ; GCN-NEXT:    v_mul_hi_u32 v2, v2, v4
-; GCN-NEXT:    s_add_i32 s3, s14, s2
 ; GCN-NEXT:    s_xor_b32 s3, s3, s2
 ; GCN-NEXT:    v_xor_b32_e32 v1, s4, v1
+; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, s4, v1
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v2, v4
 ; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
-; GCN-NEXT:    s_add_i32 s0, s19, s7
-; GCN-NEXT:    s_xor_b32 s12, s0, s7
-; GCN-NEXT:    v_cvt_f32_u32_e32 v4, s12
+; GCN-NEXT:    s_add_i32 s0, s19, s6
+; GCN-NEXT:    s_xor_b32 s14, s0, s6
+; GCN-NEXT:    v_cvt_f32_u32_e32 v4, s14
 ; GCN-NEXT:    v_mul_hi_u32 v2, v2, s3
-; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, s4, v1
-; GCN-NEXT:    s_xor_b32 s13, s2, s5
+; GCN-NEXT:    s_xor_b32 s7, s2, s5
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v4, v4
-; GCN-NEXT:    v_mul_lo_u32 v3, v2, s6
-; GCN-NEXT:    v_mul_f32_e32 v4, s20, v4
+; GCN-NEXT:    v_mul_lo_u32 v3, v2, s12
+; GCN-NEXT:    v_mul_f32_e32 v4, s16, v4
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v4, v4
 ; GCN-NEXT:    v_sub_i32_e32 v5, vcc, s3, v3
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s6, v5
-; GCN-NEXT:    s_ashr_i32 s6, s15, 31
-; GCN-NEXT:    v_mul_lo_u32 v6, v4, s12
-; GCN-NEXT:    v_mul_hi_u32 v7, v4, s12
-; GCN-NEXT:    s_add_i32 s14, s15, s6
-; GCN-NEXT:    s_xor_b32 s14, s14, s6
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s12, v5
+; GCN-NEXT:    s_ashr_i32 s12, s15, 31
+; GCN-NEXT:    v_mul_lo_u32 v6, v4, s14
+; GCN-NEXT:    v_mul_hi_u32 v7, v4, s14
+; GCN-NEXT:    s_add_i32 s13, s15, s12
+; GCN-NEXT:    s_xor_b32 s13, s13, s12
 ; GCN-NEXT:    v_sub_i32_e32 v8, vcc, 0, v6
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v7
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v8, s[4:5]
@@ -1643,17 +1643,17 @@ define amdgpu_kernel void @sdiv_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, v6, v4
 ; GCN-NEXT:    v_subrev_i32_e32 v4, vcc, v6, v4
 ; GCN-NEXT:    v_cndmask_b32_e64 v4, v4, v7, s[4:5]
-; GCN-NEXT:    v_mul_hi_u32 v4, v4, s14
+; GCN-NEXT:    v_mul_hi_u32 v4, v4, s13
 ; GCN-NEXT:    s_and_b64 vcc, s[0:1], s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[2:3]
-; GCN-NEXT:    v_mul_lo_u32 v3, v4, s12
-; GCN-NEXT:    v_xor_b32_e32 v2, s13, v2
-; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s13, v2
-; GCN-NEXT:    s_xor_b32 s4, s6, s7
-; GCN-NEXT:    v_sub_i32_e32 v5, vcc, s14, v3
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s12, v5
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s14, v3
+; GCN-NEXT:    v_mul_lo_u32 v3, v4, s14
+; GCN-NEXT:    v_xor_b32_e32 v2, s7, v2
+; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s7, v2
+; GCN-NEXT:    s_xor_b32 s4, s12, s6
+; GCN-NEXT:    v_sub_i32_e32 v5, vcc, s13, v3
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s14, v5
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s13, v3
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, -1, v4
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, 1, v4
 ; GCN-NEXT:    s_and_b64 vcc, s[0:1], s[2:3]
@@ -1862,55 +1862,55 @@ define amdgpu_kernel void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x
 ; CHECK-NEXT:    [[TMP190:%.*]] = xor i32 [[TMP189]], [[TMP147]]
 ; CHECK-NEXT:    [[TMP191:%.*]] = sub i32 [[TMP190]], [[TMP147]]
 ; CHECK-NEXT:    [[TMP192:%.*]] = insertelement <4 x i32> [[TMP144]], i32 [[TMP191]], i64 3
-; CHECK-NEXT:    store <4 x i32> [[TMP192]], <4 x i32> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <4 x i32> [[TMP192]], <4 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_v4i32:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx8 s[12:19], s[0:1], 0xd
-; GCN-NEXT:    s_mov_b32 s20, 0x4f800000
 ; GCN-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x9
 ; GCN-NEXT:    s_mov_b32 s11, 0xf000
 ; GCN-NEXT:    s_mov_b32 s10, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_ashr_i32 s2, s16, 31
 ; GCN-NEXT:    s_add_i32 s3, s16, s2
-; GCN-NEXT:    s_xor_b32 s4, s3, s2
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GCN-NEXT:    s_xor_b32 s5, s3, s2
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s5
+; GCN-NEXT:    s_mov_b32 s16, 0x4f800000
 ; GCN-NEXT:    s_ashr_i32 s6, s12, 31
-; GCN-NEXT:    s_add_i32 s0, s12, s6
 ; GCN-NEXT:    s_ashr_i32 s2, s17, 31
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    s_add_i32 s0, s12, s6
 ; GCN-NEXT:    s_add_i32 s3, s17, s2
-; GCN-NEXT:    s_xor_b32 s5, s0, s6
-; GCN-NEXT:    s_xor_b32 s7, s3, s2
-; GCN-NEXT:    v_mul_f32_e32 v0, s20, v0
+; GCN-NEXT:    s_xor_b32 s4, s0, s6
+; GCN-NEXT:    v_mul_f32_e32 v0, s16, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    s_ashr_i32 s12, s13, 31
-; GCN-NEXT:    s_add_i32 s13, s13, s12
-; GCN-NEXT:    s_xor_b32 s13, s13, s12
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s4
-; GCN-NEXT:    v_mul_hi_u32 v2, v0, s4
+; GCN-NEXT:    s_xor_b32 s17, s3, s2
+; GCN-NEXT:    s_ashr_i32 s7, s13, 31
+; GCN-NEXT:    s_add_i32 s12, s13, s7
+; GCN-NEXT:    v_mul_lo_u32 v1, v0, s5
+; GCN-NEXT:    v_mul_hi_u32 v2, v0, s5
+; GCN-NEXT:    s_xor_b32 s12, s12, s7
 ; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
 ; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s7
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s17
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v1, v0
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s5
-; GCN-NEXT:    v_mul_f32_e32 v1, s20, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s4
+; GCN-NEXT:    v_mul_f32_e32 v1, s16, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s4
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, s7
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, s7
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s5, v0
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s5, v0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s4, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, s4, v2
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s4, v2
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s5
+; GCN-NEXT:    v_mul_lo_u32 v4, v1, s17
+; GCN-NEXT:    v_mul_hi_u32 v5, v1, s17
+; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s4, v0
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s4, v0
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s5, v2
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, s5, v2
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s5, v2
 ; GCN-NEXT:    v_sub_i32_e32 v6, vcc, 0, v4
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s[4:5]
@@ -1920,63 +1920,63 @@ define amdgpu_kernel void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x
 ; GCN-NEXT:    s_and_b64 vcc, s[0:1], s[2:3]
 ; GCN-NEXT:    s_ashr_i32 s0, s18, 31
 ; GCN-NEXT:    s_add_i32 s1, s18, s0
-; GCN-NEXT:    s_xor_b32 s16, s1, s0
+; GCN-NEXT:    s_xor_b32 s13, s1, s0
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s16
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s13
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s[4:5]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, s13
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s12
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[2:3]
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v2
 ; GCN-NEXT:    v_xor_b32_e32 v0, s6, v0
-; GCN-NEXT:    v_mul_lo_u32 v1, v1, s7
+; GCN-NEXT:    v_mul_lo_u32 v1, v1, s17
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s6, v0
-; GCN-NEXT:    v_mul_f32_e32 v2, s20, v2
+; GCN-NEXT:    v_mul_f32_e32 v2, s16, v2
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s13, v1
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s13, v1
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s7, v3
-; GCN-NEXT:    v_mul_lo_u32 v5, v2, s16
-; GCN-NEXT:    v_mul_hi_u32 v6, v2, s16
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, s7, v3
-; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, s7, v3
+; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s12, v1
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s12, v1
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s17, v3
+; GCN-NEXT:    v_mul_lo_u32 v5, v2, s13
+; GCN-NEXT:    v_mul_hi_u32 v6, v2, s13
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, s17, v3
+; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, s17, v3
 ; GCN-NEXT:    v_sub_i32_e32 v7, vcc, 0, v5
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v6
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, v5, v7, s[4:5]
 ; GCN-NEXT:    v_mul_hi_u32 v5, v5, v2
 ; GCN-NEXT:    s_ashr_i32 s6, s14, 31
-; GCN-NEXT:    s_add_i32 s7, s14, s6
-; GCN-NEXT:    s_xor_b32 s7, s7, s6
+; GCN-NEXT:    s_add_i32 s12, s14, s6
+; GCN-NEXT:    s_xor_b32 s12, s12, s6
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, v5, v2
 ; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, v5, v2
 ; GCN-NEXT:    s_and_b64 vcc, s[0:1], s[2:3]
 ; GCN-NEXT:    s_ashr_i32 s0, s19, 31
 ; GCN-NEXT:    s_add_i32 s1, s19, s0
-; GCN-NEXT:    s_xor_b32 s13, s1, s0
+; GCN-NEXT:    s_xor_b32 s14, s1, s0
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
-; GCN-NEXT:    v_cvt_f32_u32_e32 v3, s13
+; GCN-NEXT:    v_cvt_f32_u32_e32 v3, s14
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s[4:5]
-; GCN-NEXT:    v_mul_hi_u32 v2, v2, s7
+; GCN-NEXT:    v_mul_hi_u32 v2, v2, s12
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v4, v1, s[2:3]
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v3
-; GCN-NEXT:    v_xor_b32_e32 v1, s12, v1
-; GCN-NEXT:    v_mul_lo_u32 v2, v2, s16
-; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, s12, v1
-; GCN-NEXT:    v_mul_f32_e32 v3, s20, v3
+; GCN-NEXT:    v_xor_b32_e32 v1, s7, v1
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, s13
+; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, s7, v1
+; GCN-NEXT:    v_mul_f32_e32 v3, s16, v3
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s7, v2
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s7, v2
 ; GCN-NEXT:    s_ashr_i32 s7, s15, 31
-; GCN-NEXT:    v_mul_lo_u32 v6, v3, s13
-; GCN-NEXT:    v_mul_hi_u32 v7, v3, s13
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s12, v2
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s12, v2
+; GCN-NEXT:    v_mul_lo_u32 v6, v3, s14
+; GCN-NEXT:    v_mul_hi_u32 v7, v3, s14
 ; GCN-NEXT:    s_add_i32 s12, s15, s7
 ; GCN-NEXT:    s_xor_b32 s12, s12, s7
 ; GCN-NEXT:    v_sub_i32_e32 v8, vcc, 0, v6
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v7
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v8, s[4:5]
 ; GCN-NEXT:    v_mul_hi_u32 v6, v6, v3
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s16, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, s16, v4
-; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s16, v4
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s13, v4
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, s13, v4
+; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s13, v4
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, v6, v3
 ; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, v6, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, v3, v7, s[4:5]
@@ -1984,14 +1984,14 @@ define amdgpu_kernel void @srem_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %x
 ; GCN-NEXT:    s_and_b64 vcc, s[0:1], s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[2:3]
-; GCN-NEXT:    v_mul_lo_u32 v3, v3, s13
+; GCN-NEXT:    v_mul_lo_u32 v3, v3, s14
 ; GCN-NEXT:    v_xor_b32_e32 v2, s6, v2
 ; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s6, v2
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s12, v3
 ; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s12, v3
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s13, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, s13, v4
-; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s13, v4
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s14, v4
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, s14, v4
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s14, v4
 ; GCN-NEXT:    s_and_b64 vcc, s[0:1], s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, v5, v3, s[2:3]
@@ -2086,7 +2086,7 @@ define amdgpu_kernel void @udiv_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x
 ; CHECK-NEXT:    [[TMP78:%.*]] = and i32 [[TMP77]], 65535
 ; CHECK-NEXT:    [[TMP79:%.*]] = trunc i32 [[TMP78]] to i16
 ; CHECK-NEXT:    [[TMP80:%.*]] = insertelement <4 x i16> [[TMP60]], i16 [[TMP79]], i64 3
-; CHECK-NEXT:    store <4 x i16> [[TMP80]], <4 x i16> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <4 x i16> [[TMP80]], <4 x i16> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v4i16:
@@ -2244,7 +2244,7 @@ define amdgpu_kernel void @urem_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x
 ; CHECK-NEXT:    [[TMP86:%.*]] = and i32 [[TMP85]], 65535
 ; CHECK-NEXT:    [[TMP87:%.*]] = trunc i32 [[TMP86]] to i16
 ; CHECK-NEXT:    [[TMP88:%.*]] = insertelement <4 x i16> [[TMP66]], i16 [[TMP87]], i64 3
-; CHECK-NEXT:    store <4 x i16> [[TMP88]], <4 x i16> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <4 x i16> [[TMP88]], <4 x i16> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_v4i16:
@@ -2418,7 +2418,7 @@ define amdgpu_kernel void @sdiv_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x
 ; CHECK-NEXT:    [[TMP94:%.*]] = ashr i32 [[TMP93]], 16
 ; CHECK-NEXT:    [[TMP95:%.*]] = trunc i32 [[TMP94]] to i16
 ; CHECK-NEXT:    [[TMP96:%.*]] = insertelement <4 x i16> [[TMP72]], i16 [[TMP95]], i64 3
-; CHECK-NEXT:    store <4 x i16> [[TMP96]], <4 x i16> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <4 x i16> [[TMP96]], <4 x i16> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_v4i16:
@@ -2612,7 +2612,7 @@ define amdgpu_kernel void @srem_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %x
 ; CHECK-NEXT:    [[TMP102:%.*]] = ashr i32 [[TMP101]], 16
 ; CHECK-NEXT:    [[TMP103:%.*]] = trunc i32 [[TMP102]] to i16
 ; CHECK-NEXT:    [[TMP104:%.*]] = insertelement <4 x i16> [[TMP78]], i16 [[TMP103]], i64 3
-; CHECK-NEXT:    store <4 x i16> [[TMP104]], <4 x i16> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <4 x i16> [[TMP104]], <4 x i16> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_v4i16:
@@ -2727,7 +2727,7 @@ define amdgpu_kernel void @udiv_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
 ; CHECK-NEXT:    [[TMP15:%.*]] = add i32 [[TMP10]], [[TMP14]]
 ; CHECK-NEXT:    [[TMP16:%.*]] = and i32 [[TMP15]], 7
 ; CHECK-NEXT:    [[TMP17:%.*]] = trunc i32 [[TMP16]] to i3
-; CHECK-NEXT:    store i3 [[TMP17]], i3 addrspace(1)* [[OUT:%.*]], align 1
+; CHECK-NEXT:    store i3 [[TMP17]], i3 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_i3:
@@ -2777,7 +2777,7 @@ define amdgpu_kernel void @urem_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
 ; CHECK-NEXT:    [[TMP17:%.*]] = sub i32 [[TMP1]], [[TMP16]]
 ; CHECK-NEXT:    [[TMP18:%.*]] = and i32 [[TMP17]], 7
 ; CHECK-NEXT:    [[TMP19:%.*]] = trunc i32 [[TMP18]] to i3
-; CHECK-NEXT:    store i3 [[TMP19]], i3 addrspace(1)* [[OUT:%.*]], align 1
+; CHECK-NEXT:    store i3 [[TMP19]], i3 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_i3:
@@ -2832,7 +2832,7 @@ define amdgpu_kernel void @sdiv_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
 ; CHECK-NEXT:    [[TMP19:%.*]] = shl i32 [[TMP18]], 29
 ; CHECK-NEXT:    [[TMP20:%.*]] = ashr i32 [[TMP19]], 29
 ; CHECK-NEXT:    [[TMP21:%.*]] = trunc i32 [[TMP20]] to i3
-; CHECK-NEXT:    store i3 [[TMP21]], i3 addrspace(1)* [[OUT:%.*]], align 1
+; CHECK-NEXT:    store i3 [[TMP21]], i3 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_i3:
@@ -2891,7 +2891,7 @@ define amdgpu_kernel void @srem_i3(i3 addrspace(1)* %out, i3 %x, i3 %y) {
 ; CHECK-NEXT:    [[TMP21:%.*]] = shl i32 [[TMP20]], 29
 ; CHECK-NEXT:    [[TMP22:%.*]] = ashr i32 [[TMP21]], 29
 ; CHECK-NEXT:    [[TMP23:%.*]] = trunc i32 [[TMP22]] to i3
-; CHECK-NEXT:    store i3 [[TMP23]], i3 addrspace(1)* [[OUT:%.*]], align 1
+; CHECK-NEXT:    store i3 [[TMP23]], i3 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_i3:
@@ -2990,7 +2990,7 @@ define amdgpu_kernel void @udiv_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x
 ; CHECK-NEXT:    [[TMP58:%.*]] = and i32 [[TMP57]], 65535
 ; CHECK-NEXT:    [[TMP59:%.*]] = trunc i32 [[TMP58]] to i16
 ; CHECK-NEXT:    [[TMP60:%.*]] = insertelement <3 x i16> [[TMP40]], i16 [[TMP59]], i64 2
-; CHECK-NEXT:    store <3 x i16> [[TMP60]], <3 x i16> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <3 x i16> [[TMP60]], <3 x i16> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v3i16:
@@ -3114,7 +3114,7 @@ define amdgpu_kernel void @urem_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x
 ; CHECK-NEXT:    [[TMP64:%.*]] = and i32 [[TMP63]], 65535
 ; CHECK-NEXT:    [[TMP65:%.*]] = trunc i32 [[TMP64]] to i16
 ; CHECK-NEXT:    [[TMP66:%.*]] = insertelement <3 x i16> [[TMP44]], i16 [[TMP65]], i64 2
-; CHECK-NEXT:    store <3 x i16> [[TMP66]], <3 x i16> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <3 x i16> [[TMP66]], <3 x i16> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_v3i16:
@@ -3254,7 +3254,7 @@ define amdgpu_kernel void @sdiv_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x
 ; CHECK-NEXT:    [[TMP70:%.*]] = ashr i32 [[TMP69]], 16
 ; CHECK-NEXT:    [[TMP71:%.*]] = trunc i32 [[TMP70]] to i16
 ; CHECK-NEXT:    [[TMP72:%.*]] = insertelement <3 x i16> [[TMP48]], i16 [[TMP71]], i64 2
-; CHECK-NEXT:    store <3 x i16> [[TMP72]], <3 x i16> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <3 x i16> [[TMP72]], <3 x i16> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_v3i16:
@@ -3404,7 +3404,7 @@ define amdgpu_kernel void @srem_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %x
 ; CHECK-NEXT:    [[TMP76:%.*]] = ashr i32 [[TMP75]], 16
 ; CHECK-NEXT:    [[TMP77:%.*]] = trunc i32 [[TMP76]] to i16
 ; CHECK-NEXT:    [[TMP78:%.*]] = insertelement <3 x i16> [[TMP52]], i16 [[TMP77]], i64 2
-; CHECK-NEXT:    store <3 x i16> [[TMP78]], <3 x i16> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <3 x i16> [[TMP78]], <3 x i16> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_v3i16:
@@ -3545,7 +3545,7 @@ define amdgpu_kernel void @udiv_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x
 ; CHECK-NEXT:    [[TMP58:%.*]] = and i32 [[TMP57]], 32767
 ; CHECK-NEXT:    [[TMP59:%.*]] = trunc i32 [[TMP58]] to i15
 ; CHECK-NEXT:    [[TMP60:%.*]] = insertelement <3 x i15> [[TMP40]], i15 [[TMP59]], i64 2
-; CHECK-NEXT:    store <3 x i15> [[TMP60]], <3 x i15> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <3 x i15> [[TMP60]], <3 x i15> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v3i15:
@@ -3677,7 +3677,7 @@ define amdgpu_kernel void @urem_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x
 ; CHECK-NEXT:    [[TMP64:%.*]] = and i32 [[TMP63]], 32767
 ; CHECK-NEXT:    [[TMP65:%.*]] = trunc i32 [[TMP64]] to i15
 ; CHECK-NEXT:    [[TMP66:%.*]] = insertelement <3 x i15> [[TMP44]], i15 [[TMP65]], i64 2
-; CHECK-NEXT:    store <3 x i15> [[TMP66]], <3 x i15> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <3 x i15> [[TMP66]], <3 x i15> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_v3i15:
@@ -3823,7 +3823,7 @@ define amdgpu_kernel void @sdiv_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x
 ; CHECK-NEXT:    [[TMP70:%.*]] = ashr i32 [[TMP69]], 17
 ; CHECK-NEXT:    [[TMP71:%.*]] = trunc i32 [[TMP70]] to i15
 ; CHECK-NEXT:    [[TMP72:%.*]] = insertelement <3 x i15> [[TMP48]], i15 [[TMP71]], i64 2
-; CHECK-NEXT:    store <3 x i15> [[TMP72]], <3 x i15> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <3 x i15> [[TMP72]], <3 x i15> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_v3i15:
@@ -3981,7 +3981,7 @@ define amdgpu_kernel void @srem_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x
 ; CHECK-NEXT:    [[TMP76:%.*]] = ashr i32 [[TMP75]], 17
 ; CHECK-NEXT:    [[TMP77:%.*]] = trunc i32 [[TMP76]] to i15
 ; CHECK-NEXT:    [[TMP78:%.*]] = insertelement <3 x i15> [[TMP52]], i15 [[TMP77]], i64 2
-; CHECK-NEXT:    store <3 x i15> [[TMP78]], <3 x i15> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <3 x i15> [[TMP78]], <3 x i15> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_v3i15:
@@ -4076,7 +4076,7 @@ define amdgpu_kernel void @srem_v3i15(<3 x i15> addrspace(1)* %out, <3 x i15> %x
 define amdgpu_kernel void @udiv_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 ; CHECK-LABEL: @udiv_i32_oddk_denom(
 ; CHECK-NEXT:    [[R:%.*]] = udiv i32 [[X:%.*]], 1235195
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_i32_oddk_denom:
@@ -4102,7 +4102,7 @@ define amdgpu_kernel void @udiv_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 define amdgpu_kernel void @udiv_i32_pow2k_denom(i32 addrspace(1)* %out, i32 %x) {
 ; CHECK-LABEL: @udiv_i32_pow2k_denom(
 ; CHECK-NEXT:    [[R:%.*]] = udiv i32 [[X:%.*]], 4096
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_i32_pow2k_denom:
@@ -4125,7 +4125,7 @@ define amdgpu_kernel void @udiv_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %
 ; CHECK-LABEL: @udiv_i32_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i32 4096, [[Y:%.*]]
 ; CHECK-NEXT:    [[R:%.*]] = udiv i32 [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_i32_pow2_shl_denom:
@@ -4154,7 +4154,7 @@ define amdgpu_kernel void @udiv_v2i32_pow2k_denom(<2 x i32> addrspace(1)* %out,
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = udiv i32 [[TMP4]], 4096
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v2i32_pow2k_denom:
@@ -4183,7 +4183,7 @@ define amdgpu_kernel void @udiv_v2i32_mixed_pow2k_denom(<2 x i32> addrspace(1)*
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = udiv i32 [[TMP4]], 4095
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v2i32_mixed_pow2k_denom:
@@ -4291,7 +4291,7 @@ define amdgpu_kernel void @udiv_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 ; CHECK-NEXT:    [[TMP78:%.*]] = select i1 [[TMP75]], i32 [[TMP76]], i32 [[TMP70]]
 ; CHECK-NEXT:    [[TMP79:%.*]] = select i1 [[TMP74]], i32 [[TMP78]], i32 [[TMP77]]
 ; CHECK-NEXT:    [[TMP80:%.*]] = insertelement <2 x i32> [[TMP40]], i32 [[TMP79]], i64 1
-; CHECK-NEXT:    store <2 x i32> [[TMP80]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <2 x i32> [[TMP80]], <2 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v2i32_pow2_shl_denom:
@@ -4364,7 +4364,7 @@ define amdgpu_kernel void @udiv_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 define amdgpu_kernel void @urem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 ; CHECK-LABEL: @urem_i32_oddk_denom(
 ; CHECK-NEXT:    [[R:%.*]] = urem i32 [[X:%.*]], 1235195
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_i32_oddk_denom:
@@ -4392,7 +4392,7 @@ define amdgpu_kernel void @urem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 define amdgpu_kernel void @urem_i32_pow2k_denom(i32 addrspace(1)* %out, i32 %x) {
 ; CHECK-LABEL: @urem_i32_pow2k_denom(
 ; CHECK-NEXT:    [[R:%.*]] = urem i32 [[X:%.*]], 4096
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_i32_pow2k_denom:
@@ -4415,7 +4415,7 @@ define amdgpu_kernel void @urem_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %
 ; CHECK-LABEL: @urem_i32_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i32 4096, [[Y:%.*]]
 ; CHECK-NEXT:    [[R:%.*]] = urem i32 [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_i32_pow2_shl_denom:
@@ -4445,7 +4445,7 @@ define amdgpu_kernel void @urem_v2i32_pow2k_denom(<2 x i32> addrspace(1)* %out,
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = urem i32 [[TMP4]], 4096
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_v2i32_pow2k_denom:
@@ -4550,7 +4550,7 @@ define amdgpu_kernel void @urem_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 ; CHECK-NEXT:    [[TMP78:%.*]] = select i1 [[TMP75]], i32 [[TMP76]], i32 [[TMP72]]
 ; CHECK-NEXT:    [[TMP79:%.*]] = select i1 [[TMP74]], i32 [[TMP78]], i32 [[TMP77]]
 ; CHECK-NEXT:    [[TMP80:%.*]] = insertelement <2 x i32> [[TMP40]], i32 [[TMP79]], i64 1
-; CHECK-NEXT:    store <2 x i32> [[TMP80]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <2 x i32> [[TMP80]], <2 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_v2i32_pow2_shl_denom:
@@ -4623,7 +4623,7 @@ define amdgpu_kernel void @urem_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 define amdgpu_kernel void @sdiv_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 ; CHECK-LABEL: @sdiv_i32_oddk_denom(
 ; CHECK-NEXT:    [[R:%.*]] = sdiv i32 [[X:%.*]], 1235195
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_i32_oddk_denom:
@@ -4649,7 +4649,7 @@ define amdgpu_kernel void @sdiv_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 define amdgpu_kernel void @sdiv_i32_pow2k_denom(i32 addrspace(1)* %out, i32 %x) {
 ; CHECK-LABEL: @sdiv_i32_pow2k_denom(
 ; CHECK-NEXT:    [[R:%.*]] = sdiv i32 [[X:%.*]], 4096
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_i32_pow2k_denom:
@@ -4675,7 +4675,7 @@ define amdgpu_kernel void @sdiv_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %
 ; CHECK-LABEL: @sdiv_i32_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i32 4096, [[Y:%.*]]
 ; CHECK-NEXT:    [[R:%.*]] = sdiv i32 [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_i32_pow2_shl_denom:
@@ -4688,17 +4688,17 @@ define amdgpu_kernel void @sdiv_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %
 ; GCN-NEXT:    s_lshl_b32 s3, 0x1000, s3
 ; GCN-NEXT:    s_ashr_i32 s8, s3, 31
 ; GCN-NEXT:    s_add_i32 s3, s3, s8
-; GCN-NEXT:    s_xor_b32 s3, s3, s8
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s3
-; GCN-NEXT:    s_ashr_i32 s9, s2, 31
-; GCN-NEXT:    s_add_i32 s2, s2, s9
-; GCN-NEXT:    s_xor_b32 s2, s2, s9
+; GCN-NEXT:    s_xor_b32 s9, s3, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s9
+; GCN-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s3
+; GCN-NEXT:    s_xor_b32 s2, s2, s3
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    s_xor_b32 s8, s9, s8
+; GCN-NEXT:    s_xor_b32 s3, s3, s8
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s3
-; GCN-NEXT:    v_mul_hi_u32 v2, v0, s3
+; GCN-NEXT:    v_mul_lo_u32 v1, v0, s9
+; GCN-NEXT:    v_mul_hi_u32 v2, v0, s9
 ; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
@@ -4707,17 +4707,17 @@ define amdgpu_kernel void @sdiv_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
 ; GCN-NEXT:    v_mul_hi_u32 v0, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s3
+; GCN-NEXT:    v_mul_lo_u32 v1, v0, s9
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s2, v1
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, s2, v1
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v4
-; GCN-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
-; GCN-NEXT:    v_xor_b32_e32 v0, s8, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v0
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s2, v1
+; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s2, v1
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s9, v1
+; GCN-NEXT:    s_and_b64 vcc, vcc, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
+; GCN-NEXT:    v_xor_b32_e32 v0, s3, v0
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s3, v0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %shl.y = shl i32 4096, %y
@@ -4734,7 +4734,7 @@ define amdgpu_kernel void @sdiv_v2i32_pow2k_denom(<2 x i32> addrspace(1)* %out,
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = sdiv i32 [[TMP4]], 4096
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_v2i32_pow2k_denom:
@@ -4769,7 +4769,7 @@ define amdgpu_kernel void @ssdiv_v2i32_mixed_pow2k_denom(<2 x i32> addrspace(1)*
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = sdiv i32 [[TMP4]], 4095
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: ssdiv_v2i32_mixed_pow2k_denom:
@@ -4898,7 +4898,7 @@ define amdgpu_kernel void @sdiv_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 ; CHECK-NEXT:    [[TMP96:%.*]] = xor i32 [[TMP95]], [[TMP54]]
 ; CHECK-NEXT:    [[TMP97:%.*]] = sub i32 [[TMP96]], [[TMP54]]
 ; CHECK-NEXT:    [[TMP98:%.*]] = insertelement <2 x i32> [[TMP49]], i32 [[TMP97]], i64 1
-; CHECK-NEXT:    store <2 x i32> [[TMP98]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <2 x i32> [[TMP98]], <2 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_v2i32_pow2_shl_denom:
@@ -4913,48 +4913,48 @@ define amdgpu_kernel void @sdiv_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 ; GCN-NEXT:    s_lshl_b32 s2, s4, s2
 ; GCN-NEXT:    s_ashr_i32 s5, s2, 31
 ; GCN-NEXT:    s_add_i32 s2, s2, s5
-; GCN-NEXT:    s_xor_b32 s2, s2, s5
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-NEXT:    s_xor_b32 s13, s2, s5
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s13
+; GCN-NEXT:    s_ashr_i32 s2, s6, 31
 ; GCN-NEXT:    s_lshl_b32 s0, s4, s3
-; GCN-NEXT:    s_ashr_i32 s3, s6, 31
-; GCN-NEXT:    s_add_i32 s1, s6, s3
+; GCN-NEXT:    s_add_i32 s1, s6, s2
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GCN-NEXT:    s_ashr_i32 s6, s0, 31
-; GCN-NEXT:    s_add_i32 s10, s0, s6
-; GCN-NEXT:    s_xor_b32 s4, s1, s3
+; GCN-NEXT:    s_add_i32 s4, s0, s6
+; GCN-NEXT:    s_xor_b32 s3, s1, s2
 ; GCN-NEXT:    v_mul_f32_e32 v0, s14, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    s_xor_b32 s12, s10, s6
-; GCN-NEXT:    s_xor_b32 s13, s3, s5
+; GCN-NEXT:    s_xor_b32 s15, s4, s6
+; GCN-NEXT:    s_xor_b32 s12, s2, s5
 ; GCN-NEXT:    s_mov_b32 s10, -1
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s2
-; GCN-NEXT:    v_mul_hi_u32 v2, v0, s2
+; GCN-NEXT:    v_mul_lo_u32 v1, v0, s13
+; GCN-NEXT:    v_mul_hi_u32 v2, v0, s13
 ; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
 ; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s12
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s15
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v1, v0
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v3, s[0:1]
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v2
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s4
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s3
 ; GCN-NEXT:    v_mul_f32_e32 v1, s14, v1
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s2
+; GCN-NEXT:    v_mul_lo_u32 v2, v0, s13
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    s_ashr_i32 s14, s7, 31
-; GCN-NEXT:    s_add_i32 s7, s7, s14
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s4, v2
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, s12
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, s12
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s4, v2
-; GCN-NEXT:    s_xor_b32 s7, s7, s14
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s3, v2
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s13, v4
+; GCN-NEXT:    v_mul_lo_u32 v4, v1, s15
+; GCN-NEXT:    v_mul_hi_u32 v5, v1, s15
+; GCN-NEXT:    s_ashr_i32 s13, s7, 31
+; GCN-NEXT:    s_add_i32 s7, s7, s13
 ; GCN-NEXT:    v_sub_i32_e32 v6, vcc, 0, v4
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s[4:5]
 ; GCN-NEXT:    v_mul_hi_u32 v4, v4, v1
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
+; GCN-NEXT:    s_xor_b32 s7, s7, s13
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s3, v2
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v4, v1
 ; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, v4, v1
@@ -4963,12 +4963,12 @@ define amdgpu_kernel void @sdiv_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 ; GCN-NEXT:    s_and_b64 vcc, s[0:1], s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[2:3]
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, s12
-; GCN-NEXT:    v_xor_b32_e32 v0, s13, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s13, v0
-; GCN-NEXT:    s_xor_b32 s4, s14, s6
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, s15
+; GCN-NEXT:    v_xor_b32_e32 v0, s12, v0
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s12, v0
+; GCN-NEXT:    s_xor_b32 s4, s13, s6
 ; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s7, v2
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s12, v3
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s15, v3
 ; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s7, v2
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v1
@@ -4988,7 +4988,7 @@ define amdgpu_kernel void @sdiv_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 define amdgpu_kernel void @srem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 ; CHECK-LABEL: @srem_i32_oddk_denom(
 ; CHECK-NEXT:    [[R:%.*]] = srem i32 [[X:%.*]], 1235195
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_i32_oddk_denom:
@@ -5016,7 +5016,7 @@ define amdgpu_kernel void @srem_i32_oddk_denom(i32 addrspace(1)* %out, i32 %x) {
 define amdgpu_kernel void @srem_i32_pow2k_denom(i32 addrspace(1)* %out, i32 %x) {
 ; CHECK-LABEL: @srem_i32_pow2k_denom(
 ; CHECK-NEXT:    [[R:%.*]] = srem i32 [[X:%.*]], 4096
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_i32_pow2k_denom:
@@ -5043,7 +5043,7 @@ define amdgpu_kernel void @srem_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %
 ; CHECK-LABEL: @srem_i32_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i32 4096, [[Y:%.*]]
 ; CHECK-NEXT:    [[R:%.*]] = srem i32 [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i32 [[R]], i32 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_i32_pow2_shl_denom:
@@ -5055,17 +5055,17 @@ define amdgpu_kernel void @srem_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %
 ; GCN-NEXT:    s_lshl_b32 s2, 0x1000, s5
 ; GCN-NEXT:    s_ashr_i32 s3, s2, 31
 ; GCN-NEXT:    s_add_i32 s2, s2, s3
-; GCN-NEXT:    s_xor_b32 s8, s2, s3
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
-; GCN-NEXT:    s_ashr_i32 s9, s4, 31
-; GCN-NEXT:    s_add_i32 s4, s4, s9
-; GCN-NEXT:    s_xor_b32 s10, s4, s9
+; GCN-NEXT:    s_xor_b32 s10, s2, s3
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s10
+; GCN-NEXT:    s_ashr_i32 s8, s4, 31
+; GCN-NEXT:    s_add_i32 s4, s4, s8
+; GCN-NEXT:    s_xor_b32 s9, s4, s8
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s8
-; GCN-NEXT:    v_mul_hi_u32 v2, v0, s8
+; GCN-NEXT:    v_mul_lo_u32 v1, v0, s10
+; GCN-NEXT:    v_mul_hi_u32 v2, v0, s10
 ; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], 0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[2:3]
@@ -5073,18 +5073,18 @@ define amdgpu_kernel void @srem_i32_pow2_shl_denom(i32 addrspace(1)* %out, i32 %
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[2:3]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s10
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s8
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s10, v0
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s10, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, s8, v1
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s8, v1
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s9
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s10
+; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s9, v0
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s9, v0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, s10, v1
+; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s10, v1
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s10, v1
 ; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
-; GCN-NEXT:    v_xor_b32_e32 v0, s9, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s9, v0
+; GCN-NEXT:    v_xor_b32_e32 v0, s8, v0
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
@@ -5102,7 +5102,7 @@ define amdgpu_kernel void @srem_v2i32_pow2k_denom(<2 x i32> addrspace(1)* %out,
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i32> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = srem i32 [[TMP4]], 4096
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i32> [[TMP3]], i32 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <2 x i32> [[TMP6]], <2 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_v2i32_pow2k_denom:
@@ -5231,7 +5231,7 @@ define amdgpu_kernel void @srem_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 ; CHECK-NEXT:    [[TMP94:%.*]] = xor i32 [[TMP93]], [[TMP51]]
 ; CHECK-NEXT:    [[TMP95:%.*]] = sub i32 [[TMP94]], [[TMP51]]
 ; CHECK-NEXT:    [[TMP96:%.*]] = insertelement <2 x i32> [[TMP48]], i32 [[TMP95]], i64 1
-; CHECK-NEXT:    store <2 x i32> [[TMP96]], <2 x i32> addrspace(1)* [[OUT:%.*]], align 8
+; CHECK-NEXT:    store <2 x i32> [[TMP96]], <2 x i32> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_v2i32_pow2_shl_denom:
@@ -5240,54 +5240,54 @@ define amdgpu_kernel void @srem_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 ; GCN-NEXT:    s_movk_i32 s4, 0x1000
 ; GCN-NEXT:    s_mov_b32 s14, 0x4f800000
 ; GCN-NEXT:    s_load_dwordx2 s[6:7], s[0:1], 0xb
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
+; GCN-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x9
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_lshl_b32 s2, s4, s2
 ; GCN-NEXT:    s_ashr_i32 s5, s2, 31
 ; GCN-NEXT:    s_add_i32 s2, s2, s5
-; GCN-NEXT:    s_xor_b32 s5, s2, s5
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s5
+; GCN-NEXT:    s_xor_b32 s13, s2, s5
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s13
 ; GCN-NEXT:    s_lshl_b32 s2, s4, s3
 ; GCN-NEXT:    s_ashr_i32 s12, s6, 31
 ; GCN-NEXT:    s_add_i32 s3, s6, s12
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GCN-NEXT:    s_ashr_i32 s4, s2, 31
-; GCN-NEXT:    s_add_i32 s8, s2, s4
-; GCN-NEXT:    s_xor_b32 s6, s3, s12
+; GCN-NEXT:    s_add_i32 s6, s2, s4
+; GCN-NEXT:    s_xor_b32 s5, s3, s12
 ; GCN-NEXT:    v_mul_f32_e32 v0, s14, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    s_xor_b32 s13, s8, s4
-; GCN-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x9
-; GCN-NEXT:    s_mov_b32 s10, -1
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s5
-; GCN-NEXT:    v_mul_hi_u32 v2, v0, s5
+; GCN-NEXT:    s_xor_b32 s15, s6, s4
+; GCN-NEXT:    s_ashr_i32 s6, s7, 31
+; GCN-NEXT:    s_add_i32 s7, s7, s6
+; GCN-NEXT:    v_mul_lo_u32 v1, v0, s13
+; GCN-NEXT:    v_mul_hi_u32 v2, v0, s13
+; GCN-NEXT:    s_xor_b32 s7, s7, s6
+; GCN-NEXT:    s_mov_b32 s11, 0xf000
 ; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], 0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[2:3]
 ; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s13
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s15
+; GCN-NEXT:    s_mov_b32 s10, -1
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v1, v0
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v3, s[2:3]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s6
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s5
 ; GCN-NEXT:    v_mul_f32_e32 v1, s14, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s5
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, s13
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, s13
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s6, v0
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s6, v0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s5, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, s5, v2
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s5, v2
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s13
+; GCN-NEXT:    v_mul_lo_u32 v4, v1, s15
+; GCN-NEXT:    v_mul_hi_u32 v5, v1, s15
+; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s5, v0
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s5, v0
 ; GCN-NEXT:    v_sub_i32_e32 v6, vcc, 0, v4
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s[4:5]
 ; GCN-NEXT:    v_mul_hi_u32 v4, v4, v1
-; GCN-NEXT:    s_ashr_i32 s6, s7, 31
-; GCN-NEXT:    s_add_i32 s7, s7, s6
-; GCN-NEXT:    s_xor_b32 s7, s7, s6
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s13, v2
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, s13, v2
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s13, v2
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v4, v1
 ; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, v4, v1
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s[4:5]
@@ -5295,20 +5295,19 @@ define amdgpu_kernel void @srem_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 ; GCN-NEXT:    s_and_b64 vcc, s[0:1], s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[2:3]
-; GCN-NEXT:    v_mul_lo_u32 v1, v1, s13
+; GCN-NEXT:    v_mul_lo_u32 v1, v1, s15
 ; GCN-NEXT:    v_xor_b32_e32 v0, s12, v0
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s12, v0
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s7, v1
 ; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], s7, v1
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s13, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, s13, v2
-; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, s13, v2
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s15, v2
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, s15, v2
+; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, s15, v2
 ; GCN-NEXT:    s_and_b64 vcc, s[0:1], s[2:3]
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[2:3]
 ; GCN-NEXT:    v_xor_b32_e32 v1, s6, v1
 ; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, s6, v1
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
 ; GCN-NEXT:    s_endpgm
   %shl.y = shl <2 x i32> <i32 4096, i32 4096>, %y
@@ -5320,7 +5319,7 @@ define amdgpu_kernel void @srem_v2i32_pow2_shl_denom(<2 x i32> addrspace(1)* %ou
 define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; CHECK-LABEL: @udiv_i64_oddk_denom(
 ; CHECK-NEXT:    [[R:%.*]] = udiv i64 [[X:%.*]], 1235195949943
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_i64_oddk_denom:
@@ -5354,7 +5353,7 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_mul_hi_u32 v6, v0, v3
 ; GCN-NEXT:    v_mul_hi_u32 v9, v1, v2
 ; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    s_mov_b32 s4, 0x976a7376
+; GCN-NEXT:    s_movk_i32 s4, 0x11e
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
 ; GCN-NEXT:    v_mul_lo_u32 v6, v1, v3
 ; GCN-NEXT:    v_mul_hi_u32 v3, v1, v3
@@ -5370,7 +5369,7 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_mul_hi_u32 v5, v0, s3
 ; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
 ; GCN-NEXT:    v_mul_lo_u32 v6, v2, s3
-; GCN-NEXT:    s_movk_i32 s2, 0x11f
+; GCN-NEXT:    s_mov_b32 s2, 0x976a7377
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
 ; GCN-NEXT:    v_mul_lo_u32 v5, v0, s3
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
@@ -5378,7 +5377,7 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_mul_hi_u32 v10, v0, v4
 ; GCN-NEXT:    v_mul_hi_u32 v9, v0, v5
 ; GCN-NEXT:    v_mul_hi_u32 v11, v2, v4
-; GCN-NEXT:    s_mov_b32 s3, 0x976a7377
+; GCN-NEXT:    s_movk_i32 s3, 0x11f
 ; GCN-NEXT:    s_mov_b32 s9, s5
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
 ; GCN-NEXT:    v_addc_u32_e32 v9, vcc, v8, v10, vcc
@@ -5408,24 +5407,24 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v8, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s2
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s3
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, s3
-; GCN-NEXT:    v_mov_b32_e32 v5, s2
+; GCN-NEXT:    v_mul_lo_u32 v2, v0, s3
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s2
+; GCN-NEXT:    v_mul_lo_u32 v4, v1, s2
+; GCN-NEXT:    v_mov_b32_e32 v5, s3
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_mul_lo_u32 v3, v0, s3
+; GCN-NEXT:    v_mul_lo_u32 v3, v0, s2
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s7, v2
 ; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s6, v3
 ; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s3, v3
+; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s2, v3
 ; GCN-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GCN-NEXT:    s_movk_i32 s3, 0x11e
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s3, v4
+; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s4, v4
+; GCN-NEXT:    s_mov_b32 s2, 0x976a7376
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s4, v5
+; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s2, v5
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s2, v4
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s3, v4
 ; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
 ; GCN-NEXT:    v_add_i32_e64 v5, s[0:1], 2, v0
 ; GCN-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
@@ -5435,11 +5434,11 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[0:1]
 ; GCN-NEXT:    v_mov_b32_e32 v6, s7
 ; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v6, v2, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s3, v2
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s4, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s4, v3
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s2, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s2, v2
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s3, v2
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
@@ -5455,7 +5454,7 @@ define amdgpu_kernel void @udiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 define amdgpu_kernel void @udiv_i64_pow2k_denom(i64 addrspace(1)* %out, i64 %x) {
 ; CHECK-LABEL: @udiv_i64_pow2k_denom(
 ; CHECK-NEXT:    [[R:%.*]] = udiv i64 [[X:%.*]], 4096
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_i64_pow2k_denom:
@@ -5480,7 +5479,7 @@ define amdgpu_kernel void @udiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; CHECK-LABEL: @udiv_i64_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i64 4096, [[Y:%.*]]
 ; CHECK-NEXT:    [[R:%.*]] = udiv i64 [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_i64_pow2_shl_denom:
@@ -5512,7 +5511,7 @@ define amdgpu_kernel void @udiv_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = udiv i64 [[TMP4]], 4096
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v2i64_pow2k_denom:
@@ -5543,7 +5542,7 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = udiv i64 [[TMP4]], 4095
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v2i64_mixed_pow2k_denom:
@@ -5551,9 +5550,9 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_mov_b32_e32 v0, 0x4f800000
 ; GCN-NEXT:    v_madak_f32 v0, 0, v0, 0x457ff000
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    s_movk_i32 s2, 0xf001
-; GCN-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-NEXT:    s_movk_i32 s6, 0xf001
 ; GCN-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
@@ -5562,96 +5561,96 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xd
+; GCN-NEXT:    s_movk_i32 s0, 0xfff
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s6
+; GCN-NEXT:    v_mul_lo_u32 v5, v1, s6
+; GCN-NEXT:    v_mul_lo_u32 v4, v0, s6
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    v_mul_hi_u32 v2, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, s2
-; GCN-NEXT:    v_mul_lo_u32 v4, v0, s2
-; GCN-NEXT:    s_mov_b32 s6, -1
-; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, v0, v2
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, v0, v3
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
 ; GCN-NEXT:    v_mul_hi_u32 v6, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v9, v1, v2
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, v3
+; GCN-NEXT:    v_mul_hi_u32 v8, v0, v3
+; GCN-NEXT:    v_mul_hi_u32 v9, v1, v3
+; GCN-NEXT:    v_mul_lo_u32 v3, v1, v3
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; GCN-NEXT:    v_mul_lo_u32 v6, v1, v4
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v7, v8, vcc
+; GCN-NEXT:    v_mul_lo_u32 v8, v1, v4
 ; GCN-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v3, v4, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v9, v7, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v4, vcc
-; GCN-NEXT:    v_mul_hi_u32 v4, v0, s2
-; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v5, v2, s2
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, s2
-; GCN-NEXT:    v_subrev_i32_e32 v4, vcc, v0, v4
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_lshr_b64 s[2:3], s[8:9], 12
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v5
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v9, v0, v6
-; GCN-NEXT:    v_mul_hi_u32 v10, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v11, v2, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v9, v5
-; GCN-NEXT:    v_addc_u32_e32 v9, vcc, v8, v10, vcc
-; GCN-NEXT:    v_mul_lo_u32 v10, v2, v6
-; GCN-NEXT:    v_mul_hi_u32 v6, v2, v6
-; GCN-NEXT:    v_mul_lo_u32 v2, v2, v4
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v10
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v9, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v11, v7, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v8, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v4, s[0:1]
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v6, v4, vcc
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v9, v2, vcc
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-NEXT:    v_add_i32_e64 v0, s[2:3], v0, v3
+; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v7, v5, vcc
+; GCN-NEXT:    v_mul_hi_u32 v5, v0, s6
+; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v1, v4, s[2:3]
+; GCN-NEXT:    v_mul_lo_u32 v6, v3, s6
+; GCN-NEXT:    v_mul_lo_u32 v8, v0, s6
+; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, v0, v5
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
+; GCN-NEXT:    v_mul_lo_u32 v6, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v9, v0, v8
+; GCN-NEXT:    v_mul_hi_u32 v10, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v11, v3, v5
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
+; GCN-NEXT:    v_addc_u32_e32 v9, vcc, v7, v10, vcc
+; GCN-NEXT:    v_mul_lo_u32 v10, v3, v8
+; GCN-NEXT:    v_mul_hi_u32 v8, v3, v8
+; GCN-NEXT:    v_mul_lo_u32 v3, v3, v5
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, v6, v10
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v9, v8, vcc
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v11, v2, vcc
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v4
+; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[2:3]
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
-; GCN-NEXT:    v_mul_hi_u32 v4, s10, v1
-; GCN-NEXT:    v_mul_hi_u32 v5, s11, v1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    v_mul_lo_u32 v3, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v4, s10, v0
+; GCN-NEXT:    v_mul_hi_u32 v5, s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v6, s11, v1
 ; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v8, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s11, v0
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v7, v5, vcc
+; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    s_movk_i32 s0, 0xfff
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
+; GCN-NEXT:    s_lshr_b64 s[2:3], s[8:9], 12
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v4, v0, vcc
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v6, v2, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v8, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, s0
-; GCN-NEXT:    v_mul_hi_u32 v5, v0, s0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 2, v0
-; GCN-NEXT:    v_mul_lo_u32 v8, v0, s0
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_mov_b32_e32 v5, s11
-; GCN-NEXT:    v_sub_i32_e32 v8, vcc, s10, v8
-; GCN-NEXT:    v_subb_u32_e32 v4, vcc, v5, v4, vcc
-; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, s0, v8
-; GCN-NEXT:    v_subbrev_u32_e32 v9, vcc, 0, v4, vcc
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v7, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, s0
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s0
+; GCN-NEXT:    v_mul_lo_u32 v4, v0, s0
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_mov_b32_e32 v3, s11
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s10, v4
+; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v3, v2, vcc
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s0, v4
+; GCN-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v2, vcc
 ; GCN-NEXT:    s_movk_i32 s0, 0xffe
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v9
-; GCN-NEXT:    v_cndmask_b32_e32 v5, -1, v5, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v8
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, -1, v5, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v7, v3, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v3, -1, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v4
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[0:1]
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v2, -1, v4, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v8, v6, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, v1, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v6, v2, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v7, v5, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, v0, v1, s[0:1]
 ; GCN-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-NEXT:    v_mov_b32_e32 v1, s3
@@ -5673,7 +5672,7 @@ define amdgpu_kernel void @udiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
 ; CHECK-NEXT:    [[TMP7:%.*]] = udiv i64 [[TMP5]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
-; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v2i64_pow2_shl_denom:
@@ -5703,7 +5702,7 @@ define amdgpu_kernel void @udiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; CHECK-LABEL: @urem_i64_oddk_denom(
 ; CHECK-NEXT:    [[R:%.*]] = urem i64 [[X:%.*]], 1235195393993
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_i64_oddk_denom:
@@ -5726,9 +5725,8 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_mul_lo_u32 v2, v0, s2
 ; GCN-NEXT:    v_mul_hi_u32 v3, v0, s3
 ; GCN-NEXT:    v_mul_lo_u32 v4, v1, s3
-; GCN-NEXT:    s_mov_b32 s12, 0x9761f7c9
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
+; GCN-NEXT:    s_movk_i32 s12, 0x11f
+; GCN-NEXT:    s_mov_b32 s13, 0x9761f7c9
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_mul_lo_u32 v3, v0, s3
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
@@ -5737,12 +5735,13 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_mul_hi_u32 v6, v0, v3
 ; GCN-NEXT:    v_mul_hi_u32 v9, v1, v2
 ; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    s_movk_i32 s4, 0x11f
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s9, s5
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
 ; GCN-NEXT:    v_mul_lo_u32 v6, v1, v3
 ; GCN-NEXT:    v_mul_hi_u32 v3, v1, v3
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v8, v4, vcc
-; GCN-NEXT:    s_mov_b32 s9, s5
+; GCN-NEXT:    s_movk_i32 s5, 0x11e
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v4, v3, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v9, v7, vcc
@@ -5753,7 +5752,7 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_mul_hi_u32 v5, v0, s3
 ; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
 ; GCN-NEXT:    v_mul_lo_u32 v6, v2, s3
-; GCN-NEXT:    s_movk_i32 s5, 0x11e
+; GCN-NEXT:    s_mov_b32 s8, s4
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
 ; GCN-NEXT:    v_mul_lo_u32 v5, v0, s3
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
@@ -5761,13 +5760,14 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_mul_hi_u32 v10, v0, v4
 ; GCN-NEXT:    v_mul_hi_u32 v9, v0, v5
 ; GCN-NEXT:    v_mul_hi_u32 v11, v2, v4
+; GCN-NEXT:    s_mov_b32 s4, 0x9761f7c8
 ; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
 ; GCN-NEXT:    v_addc_u32_e32 v9, vcc, v8, v10, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v10, v2, v5
 ; GCN-NEXT:    v_mul_hi_u32 v5, v2, v5
 ; GCN-NEXT:    v_mul_lo_u32 v2, v2, v4
+; GCN-NEXT:    s_mov_b32 s10, -1
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, v6, v10
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v9, v5, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v11, v7, vcc
@@ -5791,26 +5791,25 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v8, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s4
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s12
-; GCN-NEXT:    v_mul_lo_u32 v1, v1, s12
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s12
+; GCN-NEXT:    v_mul_lo_u32 v2, v0, s12
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s13
+; GCN-NEXT:    v_mul_lo_u32 v1, v1, s13
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s13
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s7, v1
+; GCN-NEXT:    v_mov_b32_e32 v3, s12
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
-; GCN-NEXT:    v_mov_b32_e32 v3, s4
 ; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s12, v0
+; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s13, v0
 ; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
 ; GCN-NEXT:    v_cmp_lt_u32_e64 s[2:3], s5, v5
-; GCN-NEXT:    s_mov_b32 s6, 0x9761f7c8
 ; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[2:3], s6, v4
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s12, v4
+; GCN-NEXT:    v_cmp_lt_u32_e64 s[2:3], s4, v4
+; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s13, v4
 ; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s4, v5
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s12, v5
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
 ; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
 ; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
@@ -5819,9 +5818,9 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v5, v1, vcc
 ; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s5, v1
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s6, v0
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s4, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s4, v1
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s12, v1
 ; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v6, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
@@ -5837,7 +5836,7 @@ define amdgpu_kernel void @urem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 define amdgpu_kernel void @urem_i64_pow2k_denom(i64 addrspace(1)* %out, i64 %x) {
 ; CHECK-LABEL: @urem_i64_pow2k_denom(
 ; CHECK-NEXT:    [[R:%.*]] = urem i64 [[X:%.*]], 4096
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_i64_pow2k_denom:
@@ -5862,7 +5861,7 @@ define amdgpu_kernel void @urem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; CHECK-LABEL: @urem_i64_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i64 4096, [[Y:%.*]]
 ; CHECK-NEXT:    [[R:%.*]] = urem i64 [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_i64_pow2_shl_denom:
@@ -5898,7 +5897,7 @@ define amdgpu_kernel void @urem_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = urem i64 [[TMP4]], 4096
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_v2i64_pow2k_denom:
@@ -5933,7 +5932,7 @@ define amdgpu_kernel void @urem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
 ; CHECK-NEXT:    [[TMP7:%.*]] = urem i64 [[TMP5]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
-; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_v2i64_pow2_shl_denom:
@@ -5969,7 +5968,7 @@ define amdgpu_kernel void @urem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 define amdgpu_kernel void @sdiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; CHECK-LABEL: @sdiv_i64_oddk_denom(
 ; CHECK-NEXT:    [[R:%.*]] = sdiv i64 [[X:%.*]], 1235195
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_i64_oddk_denom:
@@ -6056,33 +6055,33 @@ define amdgpu_kernel void @sdiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v8, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, s3
-; GCN-NEXT:    v_mul_hi_u32 v5, s3, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 2, v0
-; GCN-NEXT:    v_mul_lo_u32 v8, v0, s3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_sub_i32_e32 v8, vcc, s0, v8
-; GCN-NEXT:    v_mov_b32_e32 v5, s1
-; GCN-NEXT:    v_subb_u32_e32 v4, vcc, v5, v4, vcc
-; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, s3, v8
-; GCN-NEXT:    v_subbrev_u32_e32 v9, vcc, 0, v4, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, s3
+; GCN-NEXT:    v_mul_hi_u32 v3, s3, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, v0, s3
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s0, v4
+; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v3, v2, vcc
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s3, v4
+; GCN-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v2, vcc
 ; GCN-NEXT:    s_mov_b32 s0, 0x12d8fa
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v9
-; GCN-NEXT:    v_cndmask_b32_e32 v5, -1, v5, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v8
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, -1, v5, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v2, vcc
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v3, -1, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v4
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[0:1]
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v2, -1, v4, s[0:1]
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v7, v5, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v7, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v8, v6, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
 ; GCN-NEXT:    v_xor_b32_e32 v0, s2, v0
 ; GCN-NEXT:    v_xor_b32_e32 v1, s2, v1
@@ -6099,7 +6098,7 @@ define amdgpu_kernel void @sdiv_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 define amdgpu_kernel void @sdiv_i64_pow2k_denom(i64 addrspace(1)* %out, i64 %x) {
 ; CHECK-LABEL: @sdiv_i64_pow2k_denom(
 ; CHECK-NEXT:    [[R:%.*]] = sdiv i64 [[X:%.*]], 4096
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_i64_pow2k_denom:
@@ -6128,7 +6127,7 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; CHECK-LABEL: @sdiv_i64_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i64 4096, [[Y:%.*]]
 ; CHECK-NEXT:    [[R:%.*]] = sdiv i64 [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_i64_pow2_shl_denom:
@@ -6285,7 +6284,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = sdiv i64 [[TMP4]], 4096
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_v2i64_pow2k_denom:
@@ -6324,7 +6323,7 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = sdiv i64 [[TMP4]], 4095
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: ssdiv_v2i64_mixed_pow2k_denom:
@@ -6416,33 +6415,33 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, s9
-; GCN-NEXT:    v_mul_hi_u32 v5, s9, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 2, v0
-; GCN-NEXT:    v_mul_lo_u32 v8, v0, s9
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_sub_i32_e32 v8, vcc, s0, v8
-; GCN-NEXT:    v_mov_b32_e32 v5, s1
-; GCN-NEXT:    v_subb_u32_e32 v4, vcc, v5, v4, vcc
-; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, s9, v8
-; GCN-NEXT:    v_subbrev_u32_e32 v9, vcc, 0, v4, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, s9
+; GCN-NEXT:    v_mul_hi_u32 v3, s9, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, v0, s9
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s0, v4
+; GCN-NEXT:    v_mov_b32_e32 v3, s1
+; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v3, v2, vcc
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s9, v4
+; GCN-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v2, vcc
 ; GCN-NEXT:    s_movk_i32 s0, 0xffe
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v9
-; GCN-NEXT:    v_cndmask_b32_e32 v5, -1, v5, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v8
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, -1, v5, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v2, vcc
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v3, -1, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], s0, v4
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[0:1]
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v2, -1, v4, s[0:1]
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v7, v5, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v7, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v8, v6, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
 ; GCN-NEXT:    v_xor_b32_e32 v0, s8, v0
 ; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s8, v0
@@ -6469,7 +6468,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
 ; CHECK-NEXT:    [[TMP7:%.*]] = sdiv i64 [[TMP5]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
-; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_v2i64_pow2_shl_denom:
@@ -6750,7 +6749,7 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 define amdgpu_kernel void @srem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 ; CHECK-LABEL: @srem_i64_oddk_denom(
 ; CHECK-NEXT:    [[R:%.*]] = srem i64 [[X:%.*]], 1235195
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_i64_oddk_denom:
@@ -6878,7 +6877,7 @@ define amdgpu_kernel void @srem_i64_oddk_denom(i64 addrspace(1)* %out, i64 %x) {
 define amdgpu_kernel void @srem_i64_pow2k_denom(i64 addrspace(1)* %out, i64 %x) {
 ; CHECK-LABEL: @srem_i64_pow2k_denom(
 ; CHECK-NEXT:    [[R:%.*]] = srem i64 [[X:%.*]], 4096
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_i64_pow2k_denom:
@@ -6909,7 +6908,7 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 ; CHECK-LABEL: @srem_i64_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl i64 4096, [[Y:%.*]]
 ; CHECK-NEXT:    [[R:%.*]] = srem i64 [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]], align 4
+; CHECK-NEXT:    store i64 [[R]], i64 addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_i64_pow2_shl_denom:
@@ -7064,7 +7063,7 @@ define amdgpu_kernel void @srem_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 ; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
 ; CHECK-NEXT:    [[TMP5:%.*]] = srem i64 [[TMP4]], 4096
 ; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
-; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_v2i64_pow2k_denom:
@@ -7111,7 +7110,7 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
 ; CHECK-NEXT:    [[TMP7:%.*]] = srem i64 [[TMP5]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
-; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]], align 16
+; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_v2i64_pow2_shl_denom:

diff  --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
index 5f72a5666395..8c6b94da79cf 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
@@ -310,16 +310,12 @@ loop:
 ; GCN-LABEL: {{^}}expand_requires_expand:
 ; GCN-NEXT: ; %bb.0: ; %bb0
 ; GCN: s_load_dword
-; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_cmp_lt_i32 s0, 0
-; GCN-NEXT: s_cselect_b64 s[0:1], 1, 0
+; GCN: {{s|v}}_cmp_lt_i32
 ; GCN: s_cbranch
 
 ; GCN: s_load_dword
 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_cmp_lg_u32 s0, 3
-; GCN-NEXT: s_cselect_b64 s[0:1], 1, 0
-
+; GCN-NEXT: v_cmp_{{eq|ne}}_u32_e64
 ; GCN: s_cbranch_vccz [[BB2:BB[0-9]_[0-9]+]]
 
 ; GCN-NEXT: [[LONGBB1:BB[0-9]+_[0-9]+]]:
@@ -493,11 +489,9 @@ ret:
 ; GCN: [[LONG_BR_DEST0]]
 
 ; GCN: s_cbranch_vccnz
-; GCN: s_cmp_lt_i32 [[SGPR1:s[0-9]+]], 1
-; GCN: s_cselect_b64 [[MASK1:s\[[0-9]+\:[0-9]+\]]], 1, 0
-; GCN: s_cmp_ge_i32 s{{[0-9]+}}, [[SGPR1]]
-; GCN: s_cselect_b64 [[MASK2:s\[[0-9]+\:[0-9]+\]]], 1, 0
-; GCN: s_and_b64 s{{\[[0-9]+\:[0-9]+\]}}, [[MASK2]], [[MASK1]]
+; GCN-DAG: v_cmp_lt_i32
+; GCN-DAG: v_cmp_ge_i32
+
 ; GCN: s_cbranch_vccz
 ; GCN: s_setpc_b64
 

diff  --git a/llvm/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll b/llvm/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll
index ddefa110902b..faa468c974c0 100644
--- a/llvm/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll
+++ b/llvm/test/CodeGen/AMDGPU/cndmask-no-def-vcc.ll
@@ -5,8 +5,7 @@ declare i1 @llvm.amdgcn.class.f32(float, i32)
 ; Produces error after adding an implicit def to v_cndmask_b32
 
 ; GCN-LABEL: {{^}}vcc_shrink_vcc_def:
-; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 0{{$}}
-; GCN: s_cselect_b64 vcc, 1, 0
+; GCN: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}}
 ; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, vcc
 define amdgpu_kernel void @vcc_shrink_vcc_def(float %arg, i32 %arg1, float %arg2, i32 %arg3) {
 bb0:

diff  --git a/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll b/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll
index 58cdfafd0c64..d81d05f50f44 100644
--- a/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll
+++ b/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll
@@ -10,8 +10,7 @@
 ; GCN: s_branch
 
 ; GCN-DAG: v_cmp_lt_i32
-; GCN-DAG: s_cmp_gt_i32
-; GCN-DAG: s_cselect_b64
+; GCN-DAG: v_cmp_gt_i32
 ; GCN: s_and_b64
 ; GCN: s_mov_b64 exec
 

diff  --git a/llvm/test/CodeGen/AMDGPU/ctlz.ll b/llvm/test/CodeGen/AMDGPU/ctlz.ll
index 2036a6a3d192..dc8d630bc7d4 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz.ll
@@ -25,10 +25,9 @@ define amdgpu_kernel void @s_ctlz_i32(i32 addrspace(1)* noalias %out, i32 %val)
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_flbit_i32_b32 s0, s2
-; SI-NEXT:    s_cmp_lg_u32 s2, 0
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    v_mov_b32_e32 v0, s0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s2, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, 32, v0, vcc
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -41,9 +40,8 @@ define amdgpu_kernel void @s_ctlz_i32(i32 addrspace(1)* noalias %out, i32 %val)
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_flbit_i32_b32 s1, s0
-; VI-NEXT:    s_cmp_lg_u32 s0, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s0, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, 32, v0, vcc
 ; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
@@ -381,19 +379,17 @@ define amdgpu_kernel void @s_ctlz_i64(i64 addrspace(1)* noalias %out, [8 x i32],
 ; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x13
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_flbit_i32_b32 s0, s2
 ; SI-NEXT:    s_flbit_i32_b32 s1, s3
 ; SI-NEXT:    s_add_i32 s0, s0, 32
-; SI-NEXT:    s_cmp_eq_u32 s3, 0
-; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_or_b32 s2, s2, s3
 ; SI-NEXT:    v_mov_b32_e32 v0, s1
 ; SI-NEXT:    v_mov_b32_e32 v1, s0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_or_b32 s0, s2, s3
-; SI-NEXT:    s_cmp_lg_u32 s0, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s2, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, 64, v0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v1, 0
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
@@ -407,16 +403,14 @@ define amdgpu_kernel void @s_ctlz_i64(i64 addrspace(1)* noalias %out, [8 x i32],
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_flbit_i32_b32 s2, s0
-; VI-NEXT:    s_add_i32 s2, s2, 32
-; VI-NEXT:    s_cmp_eq_u32 s1, 0
 ; VI-NEXT:    s_flbit_i32_b32 s3, s1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_or_b32 s0, s0, s1
+; VI-NEXT:    s_add_i32 s2, s2, 32
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
 ; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_cmp_lg_u32 s0, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; VI-NEXT:    s_or_b32 s0, s0, s1
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s0, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, 64, v0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v1, 0
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
@@ -450,19 +444,17 @@ define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64
 ; SI-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xb
 ; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_flbit_i32_b32 s0, s2
 ; SI-NEXT:    s_flbit_i32_b32 s1, s3
 ; SI-NEXT:    s_add_i32 s0, s0, 32
-; SI-NEXT:    s_cmp_eq_u32 s3, 0
-; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_or_b32 s2, s2, s3
 ; SI-NEXT:    v_mov_b32_e32 v0, s1
 ; SI-NEXT:    v_mov_b32_e32 v1, s0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_or_b32 s0, s2, s3
-; SI-NEXT:    s_cmp_lg_u32 s0, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s2, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, 64, v0, vcc
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -475,16 +467,14 @@ define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_flbit_i32_b32 s2, s0
-; VI-NEXT:    s_add_i32 s2, s2, 32
-; VI-NEXT:    s_cmp_eq_u32 s1, 0
 ; VI-NEXT:    s_flbit_i32_b32 s3, s1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_or_b32 s0, s0, s1
+; VI-NEXT:    s_add_i32 s2, s2, 32
 ; VI-NEXT:    v_mov_b32_e32 v0, s3
 ; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_cmp_lg_u32 s0, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
+; VI-NEXT:    s_or_b32 s0, s0, s1
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s0, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, 64, v0, vcc
 ; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
index 37e8d506d4c1..081cb188cfe0 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
@@ -99,7 +99,7 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8(i8 addrspace(1)* noalias %out, i
 
 ; FUNC-LABEL: {{^}}s_ctlz_zero_undef_i64:
 ; GCN: s_load_dwordx2 s{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, {{0x13|0x4c}}
-; GCN-DAG: s_cmp_eq_u32 s[[HI]], 0{{$}}
+; GCN-DAG: v_cmp_eq_u32_e64 vcc, s[[HI]], 0{{$}}
 ; GCN-DAG: s_flbit_i32_b32 [[FFBH_LO:s[0-9]+]], s[[LO]]
 ; GCN-DAG: s_add_i32 [[ADD:s[0-9]+]], [[FFBH_LO]], 32
 ; GCN-DAG: s_flbit_i32_b32 [[FFBH_HI:s[0-9]+]], s[[HI]]

diff  --git a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
index a23e862c69bc..1ec749059452 100644
--- a/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract_vector_dynelt.ll
@@ -2,12 +2,9 @@
 
 ; GCN-LABEL: {{^}}float4_extelt:
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_ne_u32_e64 [[C2:[^,]+]], [[IDX]], 2
+; GCN-DAG: v_cmp_ne_u32_e64 [[C3:[^,]+]], [[IDX]], 3
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 0, 1.0, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], 2.0, [[V1]], [[C2]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], 4.0, [[V2]], [[C3]]
@@ -21,12 +18,9 @@ entry:
 
 ; GCN-LABEL: {{^}}int4_extelt:
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_ne_u32_e64 [[C2:[^,]+]], [[IDX]], 2
+; GCN-DAG: v_cmp_ne_u32_e64 [[C3:[^,]+]], [[IDX]], 3
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 0, 1, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], 2, [[V1]], [[C2]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], 4, [[V2]], [[C3]]
@@ -40,12 +34,9 @@ entry:
 
 ; GCN-LABEL: {{^}}double4_extelt:
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_eq_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_eq_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_eq_u32_e64 [[C2:[^,]+]], [[IDX]], 2
+; GCN-DAG: v_cmp_eq_u32_e64 [[C3:[^,]+]], [[IDX]], 3
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C2]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C3]]
@@ -59,14 +50,10 @@ entry:
 
 ; GCN-LABEL: {{^}}double5_extelt:
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_eq_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_eq_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_eq_u32 [[IDX]], 4
-; GCN-DAG: s_cselect_b64 [[C4:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_eq_u32_e64 [[C2:[^,]+]], [[IDX]], 2
+; GCN-DAG: v_cmp_eq_u32_e64 [[C3:[^,]+]], [[IDX]], 3
+; GCN-DAG: v_cmp_eq_u32_e64 [[C4:[^,]+]], [[IDX]], 4
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C2]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C3]]
@@ -96,8 +83,7 @@ entry:
 
 ; GCN-LABEL: {{^}}float2_extelt:
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 0, 1.0, [[C1]]
 ; GCN: store_dword v[{{[0-9:]+}}], [[V1]]
 define amdgpu_kernel void @float2_extelt(float addrspace(1)* %out, i32 %sel) {
@@ -109,8 +95,7 @@ entry:
 
 ; GCN-LABEL: {{^}}double2_extelt:
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, {{[^,]+}}, [[C1]]
 ; GCN: store_dwordx2 v[{{[0-9:]+}}]
@@ -123,20 +108,13 @@ entry:
 
 ; GCN-LABEL: {{^}}half8_extelt:
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 4
-; GCN-DAG: s_cselect_b64 [[C4:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 5
-; GCN-DAG: s_cselect_b64 [[C5:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 6
-; GCN-DAG: s_cselect_b64 [[C6:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 7
-; GCN-DAG: s_cselect_b64 [[C7:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_ne_u32_e64 [[C2:[^,]+]], [[IDX]], 2
+; GCN-DAG: v_cmp_ne_u32_e64 [[C3:[^,]+]], [[IDX]], 3
+; GCN-DAG: v_cmp_ne_u32_e64 [[C4:[^,]+]], [[IDX]], 4
+; GCN-DAG: v_cmp_ne_u32_e64 [[C5:[^,]+]], [[IDX]], 5
+; GCN-DAG: v_cmp_ne_u32_e64 [[C6:[^,]+]], [[IDX]], 6
+; GCN-DAG: v_cmp_ne_u32_e64 [[C7:[^,]+]], [[IDX]], 7
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], {{[^,]+}}, {{[^,]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], {{[^,]+}}, [[V1]], [[C2]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], {{[^,]+}}, [[V2]], [[C3]]
@@ -154,20 +132,13 @@ entry:
 
 ; GCN-LABEL: {{^}}short8_extelt:
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 4
-; GCN-DAG: s_cselect_b64 [[C4:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 5
-; GCN-DAG: s_cselect_b64 [[C5:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 6
-; GCN-DAG: s_cselect_b64 [[C6:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 7
-; GCN-DAG: s_cselect_b64 [[C7:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_ne_u32_e64 [[C2:[^,]+]], [[IDX]], 2
+; GCN-DAG: v_cmp_ne_u32_e64 [[C3:[^,]+]], [[IDX]], 3
+; GCN-DAG: v_cmp_ne_u32_e64 [[C4:[^,]+]], [[IDX]], 4
+; GCN-DAG: v_cmp_ne_u32_e64 [[C5:[^,]+]], [[IDX]], 5
+; GCN-DAG: v_cmp_ne_u32_e64 [[C6:[^,]+]], [[IDX]], 6
+; GCN-DAG: v_cmp_ne_u32_e64 [[C7:[^,]+]], [[IDX]], 7
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], {{[^,]+}}, {{[^,]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], {{[^,]+}}, [[V1]], [[C2]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], {{[^,]+}}, [[V2]], [[C3]]
@@ -185,20 +156,13 @@ entry:
 
 ; GCN-LABEL: {{^}}float8_extelt:
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 4
-; GCN-DAG: s_cselect_b64 [[C4:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 5
-; GCN-DAG: s_cselect_b64 [[C5:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 6
-; GCN-DAG: s_cselect_b64 [[C6:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 7
-; GCN-DAG: s_cselect_b64 [[C7:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_ne_u32_e64 [[C2:[^,]+]], [[IDX]], 2
+; GCN-DAG: v_cmp_ne_u32_e64 [[C3:[^,]+]], [[IDX]], 3
+; GCN-DAG: v_cmp_ne_u32_e64 [[C4:[^,]+]], [[IDX]], 4
+; GCN-DAG: v_cmp_ne_u32_e64 [[C5:[^,]+]], [[IDX]], 5
+; GCN-DAG: v_cmp_ne_u32_e64 [[C6:[^,]+]], [[IDX]], 6
+; GCN-DAG: v_cmp_ne_u32_e64 [[C7:[^,]+]], [[IDX]], 7
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], {{[^,]+}}, {{[^,]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], {{[^,]+}}, [[V1]], [[C2]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], {{[^,]+}}, [[V2]], [[C3]]
@@ -367,36 +331,21 @@ entry:
 
 ; GCN-LABEL: {{^}}byte16_extelt:
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 4
-; GCN-DAG: s_cselect_b64 [[C4:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 5
-; GCN-DAG: s_cselect_b64 [[C5:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 6
-; GCN-DAG: s_cselect_b64 [[C6:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 7
-; GCN-DAG: s_cselect_b64 [[C7:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 8
-; GCN-DAG: s_cselect_b64 [[C8:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 9
-; GCN-DAG: s_cselect_b64 [[C9:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 10
-; GCN-DAG: s_cselect_b64 [[C10:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 11
-; GCN-DAG: s_cselect_b64 [[C11:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 12
-; GCN-DAG: s_cselect_b64 [[C12:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 13
-; GCN-DAG: s_cselect_b64 [[C13:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 14
-; GCN-DAG: s_cselect_b64 [[C14:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 [[IDX]], 15
-; GCN-DAG: s_cselect_b64 [[C15:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_ne_u32_e64 [[C2:[^,]+]], [[IDX]], 2
+; GCN-DAG: v_cmp_ne_u32_e64 [[C3:[^,]+]], [[IDX]], 3
+; GCN-DAG: v_cmp_ne_u32_e64 [[C4:[^,]+]], [[IDX]], 4
+; GCN-DAG: v_cmp_ne_u32_e64 [[C5:[^,]+]], [[IDX]], 5
+; GCN-DAG: v_cmp_ne_u32_e64 [[C6:[^,]+]], [[IDX]], 6
+; GCN-DAG: v_cmp_ne_u32_e64 [[C7:[^,]+]], [[IDX]], 7
+; GCN-DAG: v_cmp_ne_u32_e64 [[C8:[^,]+]], [[IDX]], 8
+; GCN-DAG: v_cmp_ne_u32_e64 [[C9:[^,]+]], [[IDX]], 9
+; GCN-DAG: v_cmp_ne_u32_e64 [[C10:[^,]+]], [[IDX]], 10
+; GCN-DAG: v_cmp_ne_u32_e64 [[C11:[^,]+]], [[IDX]], 11
+; GCN-DAG: v_cmp_ne_u32_e64 [[C12:[^,]+]], [[IDX]], 12
+; GCN-DAG: v_cmp_ne_u32_e64 [[C13:[^,]+]], [[IDX]], 13
+; GCN-DAG: v_cmp_ne_u32_e64 [[C14:[^,]+]], [[IDX]], 14
+; GCN-DAG: v_cmp_ne_u32_e64 [[C15:[^,]+]], [[IDX]], 15
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], {{[^,]+}}, {{[^,]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V2:v[0-9]+]], {{[^,]+}}, [[V1]], [[C2]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V3:v[0-9]+]], {{[^,]+}}, [[V2]], [[C3]]
@@ -441,9 +390,9 @@ entry:
 ; GCN-LABEL: {{^}}bit128_extelt:
 ; GCN-NOT: buffer_
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} [[V1:v[0-9]+]], 0, 1
-; GCN-DAG: s_cmpk_lg_i32 s0, 0x7f
-; GCN:     s_cselect_b64 [[CL:[^,]+]], 1, 0
-; GCN:     v_cndmask_b32_e{{32|64}} [[VL:v[0-9]+]], 0, [[V1]], [[CL]]
+; GCN-DAG: v_mov_b32_e32 [[LASTIDX:v[0-9]+]], 0x7f
+; GCN-DAG: v_cmp_ne_u32_e32 [[CL:[^,]+]], s{{[0-9]+}}, [[LASTIDX]]
+; GCN-DAG: v_cndmask_b32_e{{32|64}} [[VL:v[0-9]+]], 0, [[V1]], [[CL]]
 ; GCN:     v_and_b32_e32 [[RES:v[0-9]+]], 1, [[VL]]
 ; GCN:     store_dword v[{{[0-9:]+}}], [[RES]]
 define amdgpu_kernel void @bit128_extelt(i32 addrspace(1)* %out, i32 %sel) {

diff  --git a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll
index 11241b36495b..1dc85cab8fc8 100644
--- a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll
@@ -14,10 +14,8 @@ define amdgpu_kernel void @extract_vector_elt_v3f64_2(double addrspace(1)* %out,
 
 ; GCN-LABEL: {{^}}dyn_extract_vector_elt_v3f64:
 ; GCN-NOT: buffer_load
-; GCN-DAG: s_cmp_eq_u32  [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_eq_u32  [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_eq_u32_e64 [[C2:[^,]+]], [[IDX]], 2
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]]
@@ -31,12 +29,9 @@ define amdgpu_kernel void @dyn_extract_vector_elt_v3f64(double addrspace(1)* %ou
 
 ; GCN-LABEL: {{^}}dyn_extract_vector_elt_v4f64:
 ; GCN-NOT: buffer_load
-; GCN-DAG: s_cmp_eq_u32  [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_eq_u32  [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_eq_u32  [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_eq_u32_e64 [[C2:[^,]+]], [[IDX]], 2
+; GCN-DAG: v_cmp_eq_u32_e64 [[C3:[^,]+]], [[IDX]], 3
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]]

diff  --git a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll
index 3d06a994859f..62144ba04376 100644
--- a/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll
@@ -31,8 +31,7 @@ define amdgpu_kernel void @extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x
 
 ; GCN-LABEL: {{^}}dyn_extract_vector_elt_v2i64:
 ; GCN-NOT: buffer_load
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]
 ; GCN: store_dwordx2 v[{{[0-9:]+}}]
@@ -45,8 +44,7 @@ define amdgpu_kernel void @dyn_extract_vector_elt_v2i64(i64 addrspace(1)* %out,
 ; GCN-LABEL: {{^}}dyn_extract_vector_elt_v2i64_2:
 ; GCN:     buffer_load_dwordx4
 ; GCN-NOT: buffer_load
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]
 ; GCN: store_dwordx2 v[{{[0-9:]+}}]
@@ -60,10 +58,8 @@ define amdgpu_kernel void @dyn_extract_vector_elt_v2i64_2(i64 addrspace(1)* %out
 
 ; GCN-LABEL: {{^}}dyn_extract_vector_elt_v3i64:
 ; GCN-NOT: buffer_load
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_eq_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_eq_u32_e64 [[C2:[^,]+]], [[IDX]], 2
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]
@@ -77,12 +73,9 @@ define amdgpu_kernel void @dyn_extract_vector_elt_v3i64(i64 addrspace(1)* %out,
 
 ; GCN-LABEL: {{^}}dyn_extract_vector_elt_v4i64:
 ; GCN-NOT: buffer_load
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[C1:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_eq_u32 [[IDX]], 2
-; GCN-DAG: s_cselect_b64 [[C2:[^,]+]], 1, 0
-; GCN-DAG: s_cmp_eq_u32 [[IDX]], 3
-; GCN-DAG: s_cselect_b64 [[C3:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[C1:[^,]+]], [[IDX:s[0-9]+]], 1
+; GCN-DAG: v_cmp_eq_u32_e64 [[C2:[^,]+]], [[IDX]], 2
+; GCN-DAG: v_cmp_eq_u32_e64 [[C3:[^,]+]], [[IDX]], 3
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C2]]
 ; GCN-DAG: v_cndmask_b32_e{{32|64}} v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[C1]]

diff  --git a/llvm/test/CodeGen/AMDGPU/extractelt-to-trunc.ll b/llvm/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
index c022ed85b170..1165cd82a299 100644
--- a/llvm/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
+++ b/llvm/test/CodeGen/AMDGPU/extractelt-to-trunc.ll
@@ -147,26 +147,23 @@ define amdgpu_kernel void @no_extract_volatile_load_dynextract(i32 addrspace(1)*
 ; GCN-LABEL: no_extract_volatile_load_dynextract:
 ; GCN:       ; %bb.0: ; %entry
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s12, s[0:1], 0xd
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_load_dword s12, s[0:1], 0xd
 ; GCN-NEXT:    s_mov_b32 s10, s2
 ; GCN-NEXT:    s_mov_b32 s11, s3
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_mov_b32 s8, s6
 ; GCN-NEXT:    s_mov_b32 s9, s7
 ; GCN-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
-; GCN-NEXT:    s_cmp_eq_u32 s12, 1
 ; GCN-NEXT:    s_mov_b32 s0, s4
 ; GCN-NEXT:    s_mov_b32 s1, s5
-; GCN-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s12, 2
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s12, 1
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s12, 3
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s12, 2
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s12, 3
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/fshl.ll b/llvm/test/CodeGen/AMDGPU/fshl.ll
index 983223acae46..0f2d16ebcc2c 100644
--- a/llvm/test/CodeGen/AMDGPU/fshl.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshl.ll
@@ -18,12 +18,11 @@ define amdgpu_kernel void @fshl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_sub_i32 s3, 32, s2
 ; SI-NEXT:    v_mov_b32_e32 v0, s1
-; SI-NEXT:    s_and_b32 s1, s2, 31
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    s_cmp_eq_u32 s1, 0
+; SI-NEXT:    s_and_b32 s1, s2, 31
 ; SI-NEXT:    v_alignbit_b32 v0, s0, v0, v1
 ; SI-NEXT:    v_mov_b32_e32 v1, s0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -36,11 +35,10 @@ define amdgpu_kernel void @fshl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; VI-NEXT:    s_sub_i32 s3, 32, s2
 ; VI-NEXT:    v_mov_b32_e32 v0, s1
 ; VI-NEXT:    s_and_b32 s1, s2, 31
-; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    s_cmp_eq_u32 s1, 0
-; VI-NEXT:    v_alignbit_b32 v0, s0, v0, v1
+; VI-NEXT:    v_mov_b32_e32 v2, s3
 ; VI-NEXT:    v_mov_b32_e32 v1, s0
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_alignbit_b32 v0, s0, v0, v2
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
@@ -55,11 +53,10 @@ define amdgpu_kernel void @fshl_i32(i32 addrspace(1)* %in, i32 %x, i32 %y, i32 %
 ; GFX9-NEXT:    s_sub_i32 s3, 32, s2
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
 ; GFX9-NEXT:    s_and_b32 s1, s2, 31
-; GFX9-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-NEXT:    s_cmp_eq_u32 s1, 0
-; GFX9-NEXT:    v_alignbit_b32 v0, s0, v0, v1
+; GFX9-NEXT:    v_mov_b32_e32 v2, s3
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s0
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
+; GFX9-NEXT:    v_alignbit_b32 v0, s0, v0, v2
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
@@ -153,21 +150,19 @@ define amdgpu_kernel void @fshl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
 ; SI-NEXT:    s_sub_i32 s10, 32, s1
-; SI-NEXT:    s_and_b32 s1, s1, 31
 ; SI-NEXT:    v_mov_b32_e32 v1, s10
-; SI-NEXT:    s_cmp_eq_u32 s1, 0
+; SI-NEXT:    s_and_b32 s1, s1, 31
 ; SI-NEXT:    v_alignbit_b32 v0, s3, v0, v1
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
 ; SI-NEXT:    s_sub_i32 s1, 32, s0
-; SI-NEXT:    s_and_b32 s0, s0, 31
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
-; SI-NEXT:    s_cmp_eq_u32 s0, 0
+; SI-NEXT:    s_and_b32 s0, s0, 31
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
 ; SI-NEXT:    v_mov_b32_e32 v2, s1
 ; SI-NEXT:    v_alignbit_b32 v0, s2, v0, v2
 ; SI-NEXT:    v_mov_b32_e32 v2, s2
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -181,21 +176,19 @@ define amdgpu_kernel void @fshl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s7
 ; VI-NEXT:    s_sub_i32 s8, 32, s1
-; VI-NEXT:    s_and_b32 s1, s1, 31
 ; VI-NEXT:    v_mov_b32_e32 v1, s8
-; VI-NEXT:    s_cmp_eq_u32 s1, 0
+; VI-NEXT:    s_and_b32 s1, s1, 31
 ; VI-NEXT:    v_alignbit_b32 v0, s5, v0, v1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    s_sub_i32 s1, 32, s0
-; VI-NEXT:    s_and_b32 s0, s0, 31
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
-; VI-NEXT:    s_cmp_eq_u32 s0, 0
+; VI-NEXT:    s_and_b32 s0, s0, 31
 ; VI-NEXT:    v_mov_b32_e32 v0, s6
 ; VI-NEXT:    v_mov_b32_e32 v2, s1
 ; VI-NEXT:    v_alignbit_b32 v0, s4, v0, v2
 ; VI-NEXT:    v_mov_b32_e32 v2, s4
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
@@ -211,21 +204,19 @@ define amdgpu_kernel void @fshl_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s7
 ; GFX9-NEXT:    s_sub_i32 s8, 32, s1
-; GFX9-NEXT:    s_and_b32 s1, s1, 31
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s8
-; GFX9-NEXT:    s_cmp_eq_u32 s1, 0
+; GFX9-NEXT:    s_and_b32 s1, s1, 31
 ; GFX9-NEXT:    v_alignbit_b32 v0, s5, v0, v1
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX9-NEXT:    s_sub_i32 s1, 32, s0
-; GFX9-NEXT:    s_and_b32 s0, s0, 31
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
-; GFX9-NEXT:    s_cmp_eq_u32 s0, 0
+; GFX9-NEXT:    s_and_b32 s0, s0, 31
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s1
 ; GFX9-NEXT:    v_alignbit_b32 v0, s4, v0, v2
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s3
@@ -336,39 +327,35 @@ define amdgpu_kernel void @fshl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s15
 ; SI-NEXT:    s_sub_i32 s16, 32, s3
-; SI-NEXT:    s_and_b32 s3, s3, 31
 ; SI-NEXT:    v_mov_b32_e32 v1, s16
-; SI-NEXT:    s_cmp_eq_u32 s3, 0
+; SI-NEXT:    s_and_b32 s3, s3, 31
 ; SI-NEXT:    v_alignbit_b32 v0, s11, v0, v1
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s11
 ; SI-NEXT:    s_sub_i32 s3, 32, s2
-; SI-NEXT:    s_and_b32 s2, s2, 31
 ; SI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; SI-NEXT:    s_cmp_eq_u32 s2, 0
+; SI-NEXT:    s_and_b32 s2, s2, 31
 ; SI-NEXT:    v_mov_b32_e32 v0, s14
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
 ; SI-NEXT:    v_alignbit_b32 v0, s10, v0, v1
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s10
 ; SI-NEXT:    s_sub_i32 s2, 32, s1
-; SI-NEXT:    s_and_b32 s1, s1, 31
 ; SI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; SI-NEXT:    s_cmp_eq_u32 s1, 0
+; SI-NEXT:    s_and_b32 s1, s1, 31
 ; SI-NEXT:    v_mov_b32_e32 v0, s13
 ; SI-NEXT:    v_mov_b32_e32 v1, s2
 ; SI-NEXT:    v_alignbit_b32 v0, s9, v0, v1
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s9
 ; SI-NEXT:    s_sub_i32 s1, 32, s0
-; SI-NEXT:    s_and_b32 s0, s0, 31
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
-; SI-NEXT:    s_cmp_eq_u32 s0, 0
+; SI-NEXT:    s_and_b32 s0, s0, 31
 ; SI-NEXT:    v_mov_b32_e32 v0, s12
 ; SI-NEXT:    v_mov_b32_e32 v4, s1
 ; SI-NEXT:    v_alignbit_b32 v0, s8, v0, v4
 ; SI-NEXT:    v_mov_b32_e32 v4, s8
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -382,39 +369,35 @@ define amdgpu_kernel void @fshl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s11
 ; VI-NEXT:    s_sub_i32 s14, 32, s3
-; VI-NEXT:    s_and_b32 s3, s3, 31
 ; VI-NEXT:    v_mov_b32_e32 v1, s14
-; VI-NEXT:    s_cmp_eq_u32 s3, 0
+; VI-NEXT:    s_and_b32 s3, s3, 31
 ; VI-NEXT:    v_alignbit_b32 v0, s7, v0, v1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s7
 ; VI-NEXT:    s_sub_i32 s3, 32, s2
-; VI-NEXT:    s_and_b32 s2, s2, 31
 ; VI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; VI-NEXT:    s_cmp_eq_u32 s2, 0
+; VI-NEXT:    s_and_b32 s2, s2, 31
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
 ; VI-NEXT:    v_mov_b32_e32 v1, s3
 ; VI-NEXT:    v_alignbit_b32 v0, s6, v0, v1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s6
 ; VI-NEXT:    s_sub_i32 s2, 32, s1
-; VI-NEXT:    s_and_b32 s1, s1, 31
 ; VI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; VI-NEXT:    s_cmp_eq_u32 s1, 0
+; VI-NEXT:    s_and_b32 s1, s1, 31
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
 ; VI-NEXT:    v_mov_b32_e32 v1, s2
 ; VI-NEXT:    v_alignbit_b32 v0, s5, v0, v1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
 ; VI-NEXT:    s_sub_i32 s1, 32, s0
-; VI-NEXT:    s_and_b32 s0, s0, 31
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
-; VI-NEXT:    s_cmp_eq_u32 s0, 0
+; VI-NEXT:    s_and_b32 s0, s0, 31
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
 ; VI-NEXT:    v_mov_b32_e32 v4, s1
 ; VI-NEXT:    v_alignbit_b32 v0, s4, v0, v4
 ; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; VI-NEXT:    v_mov_b32_e32 v4, s12
 ; VI-NEXT:    v_mov_b32_e32 v5, s13
@@ -430,39 +413,35 @@ define amdgpu_kernel void @fshl_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s11
 ; GFX9-NEXT:    s_sub_i32 s14, 32, s3
-; GFX9-NEXT:    s_and_b32 s3, s3, 31
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s14
-; GFX9-NEXT:    s_cmp_eq_u32 s3, 0
+; GFX9-NEXT:    s_and_b32 s3, s3, 31
 ; GFX9-NEXT:    v_alignbit_b32 v0, s7, v0, v1
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX9-NEXT:    s_sub_i32 s3, 32, s2
-; GFX9-NEXT:    s_and_b32 s2, s2, 31
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; GFX9-NEXT:    s_cmp_eq_u32 s2, 0
+; GFX9-NEXT:    s_and_b32 s2, s2, 31
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s10
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX9-NEXT:    v_alignbit_b32 v0, s6, v0, v1
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s6
 ; GFX9-NEXT:    s_sub_i32 s2, 32, s1
-; GFX9-NEXT:    s_and_b32 s1, s1, 31
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GFX9-NEXT:    s_cmp_eq_u32 s1, 0
+; GFX9-NEXT:    s_and_b32 s1, s1, 31
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s9
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s2
 ; GFX9-NEXT:    v_alignbit_b32 v0, s5, v0, v1
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX9-NEXT:    s_sub_i32 s1, 32, s0
-; GFX9-NEXT:    s_and_b32 s0, s0, 31
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
-; GFX9-NEXT:    s_cmp_eq_u32 s0, 0
+; GFX9-NEXT:    s_and_b32 s0, s0, 31
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s1
 ; GFX9-NEXT:    v_alignbit_b32 v0, s4, v0, v4
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s4
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s12
 ; GFX9-NEXT:    v_mov_b32_e32 v5, s13

diff  --git a/llvm/test/CodeGen/AMDGPU/fshr.ll b/llvm/test/CodeGen/AMDGPU/fshr.ll
index 843cd40c144b..e27cbe73a0bd 100644
--- a/llvm/test/CodeGen/AMDGPU/fshr.ll
+++ b/llvm/test/CodeGen/AMDGPU/fshr.ll
@@ -140,16 +140,14 @@ define amdgpu_kernel void @fshr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
 ; SI-NEXT:    s_and_b32 s1, s1, 31
 ; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    s_cmp_eq_u32 s1, 0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    s_and_b32 s0, s0, 31
 ; SI-NEXT:    v_alignbit_b32 v1, s3, v0, v1
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
-; SI-NEXT:    s_cmp_eq_u32 s0, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
 ; SI-NEXT:    v_mov_b32_e32 v2, s0
 ; SI-NEXT:    v_alignbit_b32 v2, s2, v0, v2
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -164,16 +162,14 @@ define amdgpu_kernel void @fshr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; VI-NEXT:    v_mov_b32_e32 v0, s7
 ; VI-NEXT:    s_and_b32 s1, s1, 31
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    s_cmp_eq_u32 s1, 0
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    s_and_b32 s0, s0, 31
 ; VI-NEXT:    v_alignbit_b32 v1, s5, v0, v1
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
-; VI-NEXT:    s_cmp_eq_u32 s0, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s6
 ; VI-NEXT:    v_mov_b32_e32 v2, s0
 ; VI-NEXT:    v_alignbit_b32 v2, s4, v0, v2
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
@@ -190,16 +186,14 @@ define amdgpu_kernel void @fshr_v2i32(<2 x i32> addrspace(1)* %in, <2 x i32> %x,
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s7
 ; GFX9-NEXT:    s_and_b32 s1, s1, 31
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s1
-; GFX9-NEXT:    s_cmp_eq_u32 s1, 0
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
 ; GFX9-NEXT:    s_and_b32 s0, s0, 31
 ; GFX9-NEXT:    v_alignbit_b32 v1, s5, v0, v1
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
-; GFX9-NEXT:    s_cmp_eq_u32 s0, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s0
 ; GFX9-NEXT:    v_alignbit_b32 v2, s4, v0, v2
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s2
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s3
@@ -309,30 +303,26 @@ define amdgpu_kernel void @fshr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; SI-NEXT:    v_mov_b32_e32 v0, s15
 ; SI-NEXT:    s_and_b32 s3, s3, 31
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    s_cmp_eq_u32 s3, 0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_and_b32 s2, s2, 31
 ; SI-NEXT:    v_alignbit_b32 v1, s11, v0, v1
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; SI-NEXT:    s_and_b32 s2, s2, 31
 ; SI-NEXT:    v_cndmask_b32_e32 v3, v1, v0, vcc
-; SI-NEXT:    s_cmp_eq_u32 s2, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s14
 ; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_and_b32 s1, s1, 31
 ; SI-NEXT:    v_alignbit_b32 v1, s10, v0, v1
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
+; SI-NEXT:    s_and_b32 s1, s1, 31
 ; SI-NEXT:    v_cndmask_b32_e32 v2, v1, v0, vcc
-; SI-NEXT:    s_cmp_eq_u32 s1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s13
 ; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    s_and_b32 s0, s0, 31
 ; SI-NEXT:    v_alignbit_b32 v1, s9, v0, v1
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
-; SI-NEXT:    s_cmp_eq_u32 s0, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s12
 ; SI-NEXT:    v_mov_b32_e32 v4, s0
 ; SI-NEXT:    v_alignbit_b32 v4, s8, v0, v4
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -347,30 +337,26 @@ define amdgpu_kernel void @fshr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; VI-NEXT:    v_mov_b32_e32 v0, s11
 ; VI-NEXT:    s_and_b32 s3, s3, 31
 ; VI-NEXT:    v_mov_b32_e32 v1, s3
-; VI-NEXT:    s_cmp_eq_u32 s3, 0
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_and_b32 s2, s2, 31
 ; VI-NEXT:    v_alignbit_b32 v1, s7, v0, v1
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; VI-NEXT:    s_and_b32 s2, s2, 31
 ; VI-NEXT:    v_cndmask_b32_e32 v3, v1, v0, vcc
-; VI-NEXT:    s_cmp_eq_u32 s2, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
 ; VI-NEXT:    v_mov_b32_e32 v1, s2
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_and_b32 s1, s1, 31
 ; VI-NEXT:    v_alignbit_b32 v1, s6, v0, v1
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
+; VI-NEXT:    s_and_b32 s1, s1, 31
 ; VI-NEXT:    v_cndmask_b32_e32 v2, v1, v0, vcc
-; VI-NEXT:    s_cmp_eq_u32 s1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    s_and_b32 s0, s0, 31
 ; VI-NEXT:    v_alignbit_b32 v1, s5, v0, v1
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
-; VI-NEXT:    s_cmp_eq_u32 s0, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
 ; VI-NEXT:    v_mov_b32_e32 v4, s0
 ; VI-NEXT:    v_alignbit_b32 v4, s4, v0, v4
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v4, s12
 ; VI-NEXT:    v_mov_b32_e32 v5, s13
@@ -387,30 +373,26 @@ define amdgpu_kernel void @fshr_v4i32(<4 x i32> addrspace(1)* %in, <4 x i32> %x,
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s11
 ; GFX9-NEXT:    s_and_b32 s3, s3, 31
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-NEXT:    s_cmp_eq_u32 s3, 0
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
-; GFX9-NEXT:    s_and_b32 s2, s2, 31
 ; GFX9-NEXT:    v_alignbit_b32 v1, s7, v0, v1
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GFX9-NEXT:    s_and_b32 s2, s2, 31
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v1, v0, vcc
-; GFX9-NEXT:    s_cmp_eq_u32 s2, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s10
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s2
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
-; GFX9-NEXT:    s_and_b32 s1, s1, 31
 ; GFX9-NEXT:    v_alignbit_b32 v1, s6, v0, v1
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
+; GFX9-NEXT:    s_and_b32 s1, s1, 31
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v1, v0, vcc
-; GFX9-NEXT:    s_cmp_eq_u32 s1, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s9
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s1
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
 ; GFX9-NEXT:    s_and_b32 s0, s0, 31
 ; GFX9-NEXT:    v_alignbit_b32 v1, s5, v0, v1
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s1, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v0, vcc
-; GFX9-NEXT:    s_cmp_eq_u32 s0, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX9-NEXT:    v_alignbit_b32 v4, s4, v0, v4
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s12
 ; GFX9-NEXT:    v_mov_b32_e32 v5, s13

diff  --git a/llvm/test/CodeGen/AMDGPU/i1-copy-from-loop.ll b/llvm/test/CodeGen/AMDGPU/i1-copy-from-loop.ll
index 4011c9655e84..9e6bd99f99c6 100644
--- a/llvm/test/CodeGen/AMDGPU/i1-copy-from-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/i1-copy-from-loop.ll
@@ -4,53 +4,51 @@
 define amdgpu_ps void @i1_copy_from_loop(<4 x i32> inreg %rsrc, i32 %tid) {
 ; SI-LABEL: i1_copy_from_loop:
 ; SI:       ; %bb.0: ; %entry
-; SI-NEXT:    s_mov_b32 s8, 0
+; SI-NEXT:    s_mov_b32 s6, 0
 ; SI-NEXT:    s_mov_b64 s[4:5], 0
-; SI-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; SI-NEXT:    ; implicit-def: $sgpr8_sgpr9
 ; SI-NEXT:    ; implicit-def: $sgpr10_sgpr11
 ; SI-NEXT:    s_branch BB0_3
-; SI-NEXT:  BB0_1: ; in Loop: Header=BB0_3 Depth=1
-; SI-NEXT:    ; implicit-def: $sgpr8
+; SI-NEXT:  BB0_1: ; %Flow1
+; SI-NEXT:    ; in Loop: Header=BB0_3 Depth=1
+; SI-NEXT:    s_or_b64 exec, exec, s[14:15]
 ; SI-NEXT:  BB0_2: ; %Flow
 ; SI-NEXT:    ; in Loop: Header=BB0_3 Depth=1
 ; SI-NEXT:    s_and_b64 s[14:15], exec, s[10:11]
 ; SI-NEXT:    s_or_b64 s[4:5], s[14:15], s[4:5]
-; SI-NEXT:    s_andn2_b64 s[6:7], s[6:7], exec
+; SI-NEXT:    s_andn2_b64 s[8:9], s[8:9], exec
 ; SI-NEXT:    s_and_b64 s[12:13], s[12:13], exec
-; SI-NEXT:    s_or_b64 s[6:7], s[6:7], s[12:13]
+; SI-NEXT:    s_or_b64 s[8:9], s[8:9], s[12:13]
 ; SI-NEXT:    s_andn2_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_cbranch_execz BB0_7
+; SI-NEXT:    s_cbranch_execz BB0_6
 ; SI-NEXT:  BB0_3: ; %for.body
 ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    s_cmp_lt_u32 s8, 4
-; SI-NEXT:    s_cselect_b64 s[12:13], 1, 0
 ; SI-NEXT:    s_or_b64 s[10:11], s[10:11], exec
-; SI-NEXT:    s_cmp_gt_u32 s8, 3
-; SI-NEXT:    s_cbranch_scc1 BB0_1
+; SI-NEXT:    s_cmp_gt_u32 s6, 3
+; SI-NEXT:    v_cmp_lt_u32_e64 s[12:13], s6, 4
+; SI-NEXT:    s_cbranch_scc1 BB0_2
 ; SI-NEXT:  ; %bb.4: ; %mid.loop
 ; SI-NEXT:    ; in Loop: Header=BB0_3 Depth=1
-; SI-NEXT:    v_mov_b32_e32 v1, s8
+; SI-NEXT:    v_mov_b32_e32 v1, s6
 ; SI-NEXT:    buffer_load_dword v1, v[0:1], s[0:3], 0 idxen offen
 ; SI-NEXT:    s_mov_b64 s[12:13], -1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_cmp_le_f32_e32 vcc, 0, v1
 ; SI-NEXT:    s_mov_b64 s[10:11], -1
 ; SI-NEXT:    s_and_saveexec_b64 s[14:15], vcc
+; SI-NEXT:    s_cbranch_execz BB0_1
 ; SI-NEXT:  ; %bb.5: ; %end.loop
 ; SI-NEXT:    ; in Loop: Header=BB0_3 Depth=1
-; SI-NEXT:    s_add_i32 s8, s8, 1
+; SI-NEXT:    s_add_i32 s6, s6, 1
 ; SI-NEXT:    s_xor_b64 s[10:11], exec, -1
-; SI-NEXT:  ; %bb.6: ; %Flow1
-; SI-NEXT:    ; in Loop: Header=BB0_3 Depth=1
-; SI-NEXT:    s_or_b64 exec, exec, s[14:15]
-; SI-NEXT:    s_branch BB0_2
-; SI-NEXT:  BB0_7: ; %for.end
+; SI-NEXT:    s_branch BB0_1
+; SI-NEXT:  BB0_6: ; %for.end
 ; SI-NEXT:    s_or_b64 exec, exec, s[4:5]
-; SI-NEXT:    s_and_saveexec_b64 s[0:1], s[6:7]
-; SI-NEXT:    s_cbranch_execz BB0_9
-; SI-NEXT:  ; %bb.8: ; %if
+; SI-NEXT:    s_and_saveexec_b64 s[0:1], s[8:9]
+; SI-NEXT:    s_cbranch_execz BB0_8
+; SI-NEXT:  ; %bb.7: ; %if
 ; SI-NEXT:    exp mrt0 v0, v0, v0, v0 done vm
-; SI-NEXT:  BB0_9: ; %end
+; SI-NEXT:  BB0_8: ; %end
 ; SI-NEXT:    s_endpgm
 entry:
   br label %for.body

diff  --git a/llvm/test/CodeGen/AMDGPU/icmp64.ll b/llvm/test/CodeGen/AMDGPU/icmp64.ll
index 9e1f310adc1e..3af74277df12 100644
--- a/llvm/test/CodeGen/AMDGPU/icmp64.ll
+++ b/llvm/test/CodeGen/AMDGPU/icmp64.ll
@@ -1,9 +1,8 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
 
-; GCN-LABEL: {{^}}test_i64_eq:
+; SI-LABEL: {{^}}test_i64_eq:
 ; SI: v_cmp_eq_u64
-; VI: s_cmp_eq_u64
 define amdgpu_kernel void @test_i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
   %cmp = icmp eq i64 %a, %b
   %result = sext i1 %cmp to i32
@@ -13,7 +12,6 @@ define amdgpu_kernel void @test_i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) n
 
 ; SI-LABEL: {{^}}test_i64_ne:
 ; SI: v_cmp_ne_u64
-; VI: s_cmp_lg_u64
 define amdgpu_kernel void @test_i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
   %cmp = icmp ne i64 %a, %b
   %result = sext i1 %cmp to i32
@@ -21,8 +19,8 @@ define amdgpu_kernel void @test_i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) n
   ret void
 }
 
-; GCN-LABEL: {{^}}test_i64_slt:
-; GCN: v_cmp_lt_i64
+; SI-LABEL: {{^}}test_i64_slt:
+; SI: v_cmp_lt_i64
 define amdgpu_kernel void @test_i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
   %cmp = icmp slt i64 %a, %b
   %result = sext i1 %cmp to i32
@@ -30,8 +28,8 @@ define amdgpu_kernel void @test_i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b)
   ret void
 }
 
-; GCN-LABEL: {{^}}test_i64_ult:
-; GCN: v_cmp_lt_u64
+; SI-LABEL: {{^}}test_i64_ult:
+; SI: v_cmp_lt_u64
 define amdgpu_kernel void @test_i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
   %cmp = icmp ult i64 %a, %b
   %result = sext i1 %cmp to i32
@@ -39,8 +37,8 @@ define amdgpu_kernel void @test_i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b)
   ret void
 }
 
-; GCN-LABEL: {{^}}test_i64_sle:
-; GCN: v_cmp_le_i64
+; SI-LABEL: {{^}}test_i64_sle:
+; SI: v_cmp_le_i64
 define amdgpu_kernel void @test_i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
   %cmp = icmp sle i64 %a, %b
   %result = sext i1 %cmp to i32
@@ -48,8 +46,8 @@ define amdgpu_kernel void @test_i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b)
   ret void
 }
 
-; GCN-LABEL: {{^}}test_i64_ule:
-; GCN: v_cmp_le_u64
+; SI-LABEL: {{^}}test_i64_ule:
+; SI: v_cmp_le_u64
 define amdgpu_kernel void @test_i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
   %cmp = icmp ule i64 %a, %b
   %result = sext i1 %cmp to i32
@@ -57,8 +55,8 @@ define amdgpu_kernel void @test_i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b)
   ret void
 }
 
-; GCN-LABEL: {{^}}test_i64_sgt:
-; GCN: v_cmp_gt_i64
+; SI-LABEL: {{^}}test_i64_sgt:
+; SI: v_cmp_gt_i64
 define amdgpu_kernel void @test_i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
   %cmp = icmp sgt i64 %a, %b
   %result = sext i1 %cmp to i32
@@ -66,8 +64,8 @@ define amdgpu_kernel void @test_i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b)
   ret void
 }
 
-; GCN-LABEL: {{^}}test_i64_ugt:
-; GCN: v_cmp_gt_u64
+; SI-LABEL: {{^}}test_i64_ugt:
+; SI: v_cmp_gt_u64
 define amdgpu_kernel void @test_i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
   %cmp = icmp ugt i64 %a, %b
   %result = sext i1 %cmp to i32
@@ -75,8 +73,8 @@ define amdgpu_kernel void @test_i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b)
   ret void
 }
 
-; GCN-LABEL: {{^}}test_i64_sge:
-; GCN: v_cmp_ge_i64
+; SI-LABEL: {{^}}test_i64_sge:
+; SI: v_cmp_ge_i64
 define amdgpu_kernel void @test_i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
   %cmp = icmp sge i64 %a, %b
   %result = sext i1 %cmp to i32
@@ -84,8 +82,8 @@ define amdgpu_kernel void @test_i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b)
   ret void
 }
 
-; GCN-LABEL: {{^}}test_i64_uge:
-; GCN: v_cmp_ge_u64
+; SI-LABEL: {{^}}test_i64_uge:
+; SI: v_cmp_ge_u64
 define amdgpu_kernel void @test_i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
   %cmp = icmp uge i64 %a, %b
   %result = sext i1 %cmp to i32

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
index fd2c3f4cc1db..851d232e00a6 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_dynelt.ll
@@ -3,17 +3,13 @@
 ; GCN-LABEL: {{^}}float4_inselt:
 ; GCN-NOT: v_movrel
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 3
-; GCN-DAG: s_cselect_b64 [[CC1:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX:s[0-9]+]], 3
 ; GCN-DAG: v_cndmask_b32_e32 v[[ELT_LAST:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC1]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 2
-; GCN-DAG: s_cselect_b64 [[CC2:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX]], 2
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC2]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[CC3:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC3:[^,]+]], [[IDX]], 1
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC3]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 0
-; GCN-DAG: s_cselect_b64 [[CC4:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC4:[^,]+]], [[IDX]], 0
 ; GCN-DAG: v_cndmask_b32_e32 v[[ELT_FIRST:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC4]]
 ; GCN:     flat_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[ELT_FIRST]]:[[ELT_LAST]]]
 define amdgpu_kernel void @float4_inselt(<4 x float> addrspace(1)* %out, <4 x float> %vec, i32 %sel) {
@@ -42,17 +38,13 @@ entry:
 ; GCN-LABEL: {{^}}int4_inselt:
 ; GCN-NOT: v_movrel
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 3
-; GCN-DAG: s_cselect_b64 [[CC1:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX:s[0-9]+]], 3
 ; GCN-DAG: v_cndmask_b32_e32 v[[ELT_LAST:[0-9]+]], 1, v{{[0-9]+}}, [[CC1]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 2
-; GCN-DAG: s_cselect_b64 [[CC2:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX]], 2
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}, [[CC2]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[CC3:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC3:[^,]+]], [[IDX]], 1
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}, [[CC3]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 0
-; GCN-DAG: s_cselect_b64 [[CC4:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC4:[^,]+]], [[IDX]], 0
 ; GCN-DAG: v_cndmask_b32_e32 v[[ELT_FIRST:[0-9]+]], 1, v{{[0-9]+}}, [[CC4]]
 ; GCN:     flat_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[ELT_FIRST]]:[[ELT_LAST]]]
 define amdgpu_kernel void @int4_inselt(<4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %sel) {
@@ -65,11 +57,9 @@ entry:
 ; GCN-LABEL: {{^}}float2_inselt:
 ; GCN-NOT: v_movrel
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[CC1:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX:s[0-9]+]], 1
 ; GCN-DAG: v_cndmask_b32_e32 v[[ELT_LAST:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC1]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 0
-; GCN-DAG: s_cselect_b64 [[CC2:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX]], 0
 ; GCN-DAG: v_cndmask_b32_e32 v[[ELT_FIRST:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC2]]
 ; GCN:     flat_store_dwordx2 v[{{[0-9:]+}}], v{{\[}}[[ELT_FIRST]]:[[ELT_LAST]]]
 define amdgpu_kernel void @float2_inselt(<2 x float> addrspace(1)* %out, <2 x float> %vec, i32 %sel) {
@@ -82,29 +72,21 @@ entry:
 ; GCN-LABEL: {{^}}float8_inselt:
 ; GCN-NOT: v_movrel
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 3
-; GCN-DAG: s_cselect_b64 [[CC1:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], [[IDX:s[0-9]+]], 3
 ; GCN-DAG: v_cndmask_b32_e32 v[[ELT_LAST0:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC1]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 2
-; GCN-DAG: s_cselect_b64 [[CC2:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC2:[^,]+]], [[IDX]], 2
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC2]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[CC3:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC3:[^,]+]], [[IDX]], 1
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC3]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 0
-; GCN-DAG: s_cselect_b64 [[CC4:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC4:[^,]+]], [[IDX]], 0
 ; GCN-DAG: v_cndmask_b32_e32 v[[ELT_FIRST0:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC4]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 7
-; GCN-DAG: s_cselect_b64 [[CC5:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC5:[^,]+]], [[IDX:s[0-9]+]], 7
 ; GCN-DAG: v_cndmask_b32_e32 v[[ELT_LAST1:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC5]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 6
-; GCN-DAG: s_cselect_b64 [[CC6:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC6:[^,]+]], [[IDX]], 6
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC6]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 5
-; GCN-DAG: s_cselect_b64 [[CC7:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC7:[^,]+]], [[IDX]], 5
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1.0, v{{[0-9]+}}, [[CC7]]
-; GCN-DAG: s_cmp_lg_u32 [[IDX:s[0-9]+]], 4
-; GCN-DAG: s_cselect_b64 [[CC8:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC8:[^,]+]], [[IDX]], 4
 ; GCN-DAG: v_cndmask_b32_e32 v[[ELT_FIRST1:[0-9]+]], 1.0, v{{[0-9]+}}, [[CC8]]
 ; GCN-DAG: flat_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[ELT_FIRST0]]:[[ELT_LAST0]]]
 ; GCN-DAG: flat_store_dwordx4 v[{{[0-9:]+}}], v{{\[}}[[ELT_FIRST1]]:[[ELT_LAST1]]]
@@ -167,22 +149,14 @@ entry:
 ; GCN-LABEL: {{^}}half8_inselt:
 ; GCN-NOT: v_movrel
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_lg_u32 {{s[0-9]+}}, 0
-; GCN-DAG: s_cselect_b64 {{[^,]+}}, 1, 0
-; GCN-DAG: s_cmp_lg_u32 {{s[0-9]+}}, 1
-; GCN-DAG: s_cselect_b64 {{[^,]+}}, 1, 0
-; GCN-DAG: s_cmp_lg_u32 {{s[0-9]+}}, 2
-; GCN-DAG: s_cselect_b64 {{[^,]+}}, 1, 0
-; GCN-DAG: s_cmp_lg_u32 {{s[0-9]+}}, 3
-; GCN-DAG: s_cselect_b64 {{[^,]+}}, 1, 0
-; GCN-DAG: s_cmp_lg_u32 {{s[0-9]+}}, 4
-; GCN-DAG: s_cselect_b64 {{[^,]+}}, 1, 0
-; GCN-DAG: s_cmp_lg_u32 {{s[0-9]+}}, 5
-; GCN-DAG: s_cselect_b64 {{[^,]+}}, 1, 0
-; GCN-DAG: s_cmp_lg_u32 {{s[0-9]+}}, 6
-; GCN-DAG: s_cselect_b64 {{[^,]+}}, 1, 0
-; GCN-DAG: s_cmp_lg_u32 {{s[0-9]+}}, 7
-; GCN-DAG: s_cselect_b64 {{[^,]+}}, 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 0
+; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 1
+; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 2
+; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 3
+; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 4
+; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 5
+; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 6
+; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 7
 ; GCN-DAG: v_cndmask_b32_e32
 ; GCN-DAG: v_cndmask_b32_e32
 ; GCN-DAG: v_cndmask_b32_e32
@@ -254,10 +228,8 @@ entry:
 ; GCN-LABEL: {{^}}byte16_inselt:
 ; GCN-NOT: v_movrel
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_lg_u32 {{s[0-9]+}}, 0
-; GCN-DAG: s_cselect_b64 {{[^,]+}}, 1, 0
-; GCN-DAG: s_cmp_lg_u32 {{s[0-9]+}}, 15
-; GCN-DAG: s_cselect_b64 {{[^,]+}}, 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 0
+; GCN-DAG: v_cmp_ne_u32_e64 {{[^,]+}}, {{s[0-9]+}}, 15
 ; GCN-DAG: v_cndmask_b32_e32
 ; GCN-DAG: v_cndmask_b32_e32
 ; GCN-DAG: v_cndmask_b32_e32
@@ -292,12 +264,10 @@ entry:
 ; GCN-LABEL: {{^}}double2_inselt:
 ; GCN-NOT: v_movrel
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_eq_u32 [[IDX:s[0-9]+]], 1
-; GCN-DAG: s_cselect_b64 [[CC1:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[CC1:[^,]+]], [[IDX:s[0-9]+]], 1
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC1]]
 ; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC1]]
-; GCN-DAG: s_cmp_eq_u32 [[IDX]], 0
-; GCN-DAG: s_cselect_b64 [[CC2:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[CC2:[^,]+]], [[IDX]], 0
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC2]]
 ; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, 0, [[CC2]]
 define amdgpu_kernel void @double2_inselt(<2 x double> addrspace(1)* %out, <2 x double> %vec, i32 %sel) {
@@ -393,12 +363,10 @@ entry:
 
 ; GCN-LABEL: {{^}}bit128_inselt:
 ; GCN-NOT: buffer_
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN-DAG: s_cselect_b64 [[CC1:[^,]+]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[CC1:[^,]+]], s{{[0-9]+}}, 0
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}, [[CC1]]
-
-; GCN-DAG: s_cmpk_lg_i32 s{{[0-9]+}}, 0x7f
-; GCN-DAG: s_cselect_b64 [[CCL:[^,]+]], 1, 0
+; GCN-DAG: v_mov_b32_e32 [[LASTIDX:v[0-9]+]], 0x7f
+; GCN-DAG: v_cmp_ne_u32_e32 [[CCL:[^,]+]], s{{[0-9]+}}, [[LASTIDX]]
 ; GCN-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, 1, v{{[0-9]+}}, [[CCL]]
 define amdgpu_kernel void @bit128_inselt(<128 x i1> addrspace(1)* %out, <128 x i1> %vec, i32 %sel) {
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 74d8c9a23641..74fa324c6d05 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -290,12 +290,10 @@ define amdgpu_kernel void @dynamic_insertelement_v2f32(<2 x float> addrspace(1)*
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v1, s7
-; SI-NEXT:    s_cmp_lg_u32 s4, 1
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; SI-NEXT:    v_mov_b32_e32 v2, s6
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
@@ -310,12 +308,10 @@ define amdgpu_kernel void @dynamic_insertelement_v2f32(<2 x float> addrspace(1)*
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v1, s7
-; VI-NEXT:    s_cmp_lg_u32 s4, 1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; VI-NEXT:    v_mov_b32_e32 v2, s6
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
@@ -335,16 +331,13 @@ define amdgpu_kernel void @dynamic_insertelement_v3f32(<3 x float> addrspace(1)*
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v1, s10
-; SI-NEXT:    s_cmp_lg_u32 s4, 2
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_lg_u32 s4, 1
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; SI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s9
-; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; SI-NEXT:    v_mov_b32_e32 v3, s8
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
 ; SI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
@@ -359,16 +352,13 @@ define amdgpu_kernel void @dynamic_insertelement_v3f32(<3 x float> addrspace(1)*
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v1, s10
-; VI-NEXT:    s_cmp_lg_u32 s4, 2
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_cmp_lg_u32 s4, 1
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; VI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s9
-; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; VI-NEXT:    v_mov_b32_e32 v3, s8
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
 ; VI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
@@ -388,20 +378,16 @@ define amdgpu_kernel void @dynamic_insertelement_v4f32(<4 x float> addrspace(1)*
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v1, s11
-; SI-NEXT:    s_cmp_lg_u32 s4, 3
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_lg_u32 s4, 2
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
 ; SI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s10
-; SI-NEXT:    s_cmp_lg_u32 s4, 1
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; SI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s9
-; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; SI-NEXT:    v_mov_b32_e32 v4, s8
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
@@ -416,20 +402,16 @@ define amdgpu_kernel void @dynamic_insertelement_v4f32(<4 x float> addrspace(1)*
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v1, s11
-; VI-NEXT:    s_cmp_lg_u32 s4, 3
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_cmp_lg_u32 s4, 2
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
 ; VI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s10
-; VI-NEXT:    s_cmp_lg_u32 s4, 1
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; VI-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s9
-; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; VI-NEXT:    v_mov_b32_e32 v4, s8
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
@@ -449,36 +431,28 @@ define amdgpu_kernel void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)*
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s11
-; SI-NEXT:    s_cmp_lg_u32 s4, 3
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_lg_u32 s4, 2
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
 ; SI-NEXT:    v_cndmask_b32_e32 v3, v4, v0, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
-; SI-NEXT:    s_cmp_lg_u32 s4, 1
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; SI-NEXT:    v_cndmask_b32_e32 v2, v4, v0, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
-; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v4, v0, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    s_cmp_lg_u32 s4, 7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v5, s15
-; SI-NEXT:    s_cmp_lg_u32 s4, 6
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
 ; SI-NEXT:    v_cndmask_b32_e32 v7, v4, v5, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v5, s14
-; SI-NEXT:    s_cmp_lg_u32 s4, 5
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
 ; SI-NEXT:    v_cndmask_b32_e32 v6, v4, v5, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v5, s13
-; SI-NEXT:    s_cmp_lg_u32 s4, 4
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
 ; SI-NEXT:    v_cndmask_b32_e32 v5, v4, v5, vcc
 ; SI-NEXT:    v_mov_b32_e32 v8, s12
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
 ; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
@@ -494,36 +468,28 @@ define amdgpu_kernel void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)*
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s11
-; VI-NEXT:    s_cmp_lg_u32 s4, 3
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_cmp_lg_u32 s4, 2
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
 ; VI-NEXT:    v_cndmask_b32_e32 v3, v4, v0, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    s_cmp_lg_u32 s4, 1
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; VI-NEXT:    v_cndmask_b32_e32 v2, v4, v0, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v4, v0, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    s_cmp_lg_u32 s4, 7
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v5, s15
-; VI-NEXT:    s_cmp_lg_u32 s4, 6
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
 ; VI-NEXT:    v_cndmask_b32_e32 v7, v4, v5, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v5, s14
-; VI-NEXT:    s_cmp_lg_u32 s4, 5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
 ; VI-NEXT:    v_cndmask_b32_e32 v6, v4, v5, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v5, s13
-; VI-NEXT:    s_cmp_lg_u32 s4, 4
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
 ; VI-NEXT:    v_cndmask_b32_e32 v5, v4, v5, vcc
 ; VI-NEXT:    v_mov_b32_e32 v8, s12
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
 ; VI-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
 ; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
@@ -614,12 +580,10 @@ define amdgpu_kernel void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s7
-; SI-NEXT:    s_cmp_lg_u32 s4, 1
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v0, s6
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
@@ -633,12 +597,10 @@ define amdgpu_kernel void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s7
-; VI-NEXT:    s_cmp_lg_u32 s4, 1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v0, s6
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
@@ -657,16 +619,13 @@ define amdgpu_kernel void @dynamic_insertelement_v3i32(<3 x i32> addrspace(1)* %
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
-; SI-NEXT:    s_cmp_lg_u32 s4, 2
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_lg_u32 s4, 1
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
-; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; SI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
@@ -680,16 +639,13 @@ define amdgpu_kernel void @dynamic_insertelement_v3i32(<3 x i32> addrspace(1)* %
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    s_cmp_lg_u32 s4, 2
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_cmp_lg_u32 s4, 1
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; VI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; VI-NEXT:    buffer_store_dwordx3 v[0:2], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
@@ -709,21 +665,17 @@ define amdgpu_kernel void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s11
-; SI-NEXT:    s_cmp_eq_u32 s6, 3
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 3
 ; SI-NEXT:    v_mov_b32_e32 v4, s4
-; SI-NEXT:    s_cmp_eq_u32 s6, 2
 ; SI-NEXT:    v_cndmask_b32_e32 v3, v0, v4, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
-; SI-NEXT:    s_cmp_eq_u32 s6, 1
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 2
 ; SI-NEXT:    v_cndmask_b32_e32 v2, v0, v4, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
-; SI-NEXT:    s_cmp_eq_u32 s6, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 1
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v4, vcc
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
@@ -738,21 +690,17 @@ define amdgpu_kernel void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s11
-; VI-NEXT:    s_cmp_eq_u32 s6, 3
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 3
 ; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    s_cmp_eq_u32 s6, 2
 ; VI-NEXT:    v_cndmask_b32_e32 v3, v0, v4, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    s_cmp_eq_u32 s6, 1
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 2
 ; VI-NEXT:    v_cndmask_b32_e32 v2, v0, v4, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    s_cmp_eq_u32 s6, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 1
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v4, vcc
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s6, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; VI-NEXT:    s_endpgm
@@ -771,36 +719,28 @@ define amdgpu_kernel void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s11
-; SI-NEXT:    s_cmp_lg_u32 s4, 3
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_lg_u32 s4, 2
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
 ; SI-NEXT:    v_cndmask_b32_e32 v3, 5, v0, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
-; SI-NEXT:    s_cmp_lg_u32 s4, 1
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
-; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    s_cmp_lg_u32 s4, 7
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v4, s15
-; SI-NEXT:    s_cmp_lg_u32 s4, 6
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
 ; SI-NEXT:    v_cndmask_b32_e32 v7, 5, v4, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v4, s14
-; SI-NEXT:    s_cmp_lg_u32 s4, 5
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
 ; SI-NEXT:    v_cndmask_b32_e32 v6, 5, v4, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v4, s13
-; SI-NEXT:    s_cmp_lg_u32 s4, 4
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
 ; SI-NEXT:    v_cndmask_b32_e32 v5, 5, v4, vcc
 ; SI-NEXT:    v_mov_b32_e32 v4, s12
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
 ; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
 ; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
@@ -815,36 +755,28 @@ define amdgpu_kernel void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s11
-; VI-NEXT:    s_cmp_lg_u32 s4, 3
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_cmp_lg_u32 s4, 2
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
 ; VI-NEXT:    v_cndmask_b32_e32 v3, 5, v0, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    s_cmp_lg_u32 s4, 1
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; VI-NEXT:    v_cndmask_b32_e32 v2, 5, v0, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v0, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    s_cmp_lg_u32 s4, 7
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v4, s15
-; VI-NEXT:    s_cmp_lg_u32 s4, 6
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
 ; VI-NEXT:    v_cndmask_b32_e32 v7, 5, v4, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v4, s14
-; VI-NEXT:    s_cmp_lg_u32 s4, 5
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
 ; VI-NEXT:    v_cndmask_b32_e32 v6, 5, v4, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v4, s13
-; VI-NEXT:    s_cmp_lg_u32 s4, 4
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
 ; VI-NEXT:    v_cndmask_b32_e32 v5, 5, v4, vcc
 ; VI-NEXT:    v_mov_b32_e32 v4, s12
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
 ; VI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
 ; VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:16
 ; VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
@@ -1195,112 +1127,96 @@ define amdgpu_kernel void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_lshr_b32 s5, s11, 24
-; SI-NEXT:    s_cmp_lg_u32 s4, 15
 ; SI-NEXT:    v_mov_b32_e32 v0, s5
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 15
 ; SI-NEXT:    s_lshr_b32 s5, s11, 16
-; SI-NEXT:    s_cmp_lg_u32 s4, 14
 ; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v1, s5
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_lshr_b32 s6, s11, 8
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 14
 ; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
 ; SI-NEXT:    s_movk_i32 s5, 0xff
-; SI-NEXT:    s_cmp_lg_u32 s4, 13
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
 ; SI-NEXT:    v_and_b32_e32 v1, s5, v1
+; SI-NEXT:    s_lshr_b32 s6, s11, 8
 ; SI-NEXT:    v_or_b32_e32 v0, v1, v0
 ; SI-NEXT:    v_mov_b32_e32 v1, s6
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_lg_u32 s4, 12
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 13
 ; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v2, s11
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 12
 ; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v2, vcc
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
 ; SI-NEXT:    v_and_b32_e32 v2, s5, v2
 ; SI-NEXT:    v_or_b32_e32 v1, v2, v1
 ; SI-NEXT:    s_mov_b32 s6, 0xffff
-; SI-NEXT:    s_lshr_b32 s7, s10, 24
-; SI-NEXT:    s_cmp_lg_u32 s4, 11
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; SI-NEXT:    v_and_b32_e32 v1, s6, v1
+; SI-NEXT:    s_lshr_b32 s7, s10, 24
 ; SI-NEXT:    v_or_b32_e32 v3, v1, v0
 ; SI-NEXT:    v_mov_b32_e32 v0, s7
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 11
 ; SI-NEXT:    s_lshr_b32 s7, s10, 16
-; SI-NEXT:    s_cmp_lg_u32 s4, 10
 ; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v1, s7
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 10
 ; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; SI-NEXT:    s_lshr_b32 s7, s10, 8
-; SI-NEXT:    s_cmp_lg_u32 s4, 9
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
 ; SI-NEXT:    v_and_b32_e32 v1, s5, v1
+; SI-NEXT:    s_lshr_b32 s7, s10, 8
 ; SI-NEXT:    v_or_b32_e32 v0, v1, v0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s7
-; SI-NEXT:    s_cmp_lg_u32 s4, 8
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 9
 ; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v2, s10
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 8
 ; SI-NEXT:    v_cndmask_b32_e32 v2, 5, v2, vcc
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
 ; SI-NEXT:    v_and_b32_e32 v2, s5, v2
 ; SI-NEXT:    v_or_b32_e32 v1, v2, v1
-; SI-NEXT:    s_lshr_b32 s7, s9, 24
-; SI-NEXT:    s_cmp_lg_u32 s4, 7
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; SI-NEXT:    v_and_b32_e32 v1, s6, v1
+; SI-NEXT:    s_lshr_b32 s7, s9, 24
 ; SI-NEXT:    v_or_b32_e32 v2, v1, v0
 ; SI-NEXT:    v_mov_b32_e32 v0, s7
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
 ; SI-NEXT:    s_lshr_b32 s7, s9, 16
-; SI-NEXT:    s_cmp_lg_u32 s4, 6
 ; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v1, s7
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
 ; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; SI-NEXT:    s_lshr_b32 s7, s9, 8
-; SI-NEXT:    s_cmp_lg_u32 s4, 5
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
 ; SI-NEXT:    v_and_b32_e32 v1, s5, v1
+; SI-NEXT:    s_lshr_b32 s7, s9, 8
 ; SI-NEXT:    v_or_b32_e32 v0, v1, v0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v1, s7
-; SI-NEXT:    s_cmp_lg_u32 s4, 4
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
 ; SI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v4, s9
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
 ; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
 ; SI-NEXT:    v_lshlrev_b32_e32 v1, 8, v1
 ; SI-NEXT:    v_and_b32_e32 v4, s5, v4
 ; SI-NEXT:    v_or_b32_e32 v1, v4, v1
-; SI-NEXT:    s_lshr_b32 s7, s8, 24
-; SI-NEXT:    s_cmp_lg_u32 s4, 3
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; SI-NEXT:    v_and_b32_e32 v1, s6, v1
+; SI-NEXT:    s_lshr_b32 s7, s8, 24
 ; SI-NEXT:    v_or_b32_e32 v1, v1, v0
 ; SI-NEXT:    v_mov_b32_e32 v0, s7
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
 ; SI-NEXT:    s_lshr_b32 s7, s8, 16
-; SI-NEXT:    s_cmp_lg_u32 s4, 2
 ; SI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v4, s7
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
-; SI-NEXT:    s_lshr_b32 s7, s8, 8
-; SI-NEXT:    s_cmp_lg_u32 s4, 1
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 8, v0
 ; SI-NEXT:    v_and_b32_e32 v4, s5, v4
+; SI-NEXT:    s_lshr_b32 s7, s8, 8
 ; SI-NEXT:    v_or_b32_e32 v0, v4, v0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v4, s7
-; SI-NEXT:    s_cmp_lg_u32 s4, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; SI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
 ; SI-NEXT:    v_mov_b32_e32 v5, s8
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v5, 5, v5, vcc
 ; SI-NEXT:    v_lshlrev_b32_e32 v4, 8, v4
 ; SI-NEXT:    v_and_b32_e32 v5, s5, v5
@@ -1320,97 +1236,81 @@ define amdgpu_kernel void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_lshr_b32 s5, s11, 24
-; VI-NEXT:    s_cmp_lg_u32 s4, 15
 ; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 15
 ; VI-NEXT:    s_lshr_b32 s5, s11, 16
-; VI-NEXT:    s_cmp_lg_u32 s4, 14
 ; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_lshr_b32 s5, s11, 8
-; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 14
 ; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; VI-NEXT:    s_cmp_lg_u32 s4, 13
+; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    s_lshr_b32 s5, s11, 8
 ; VI-NEXT:    v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
-; VI-NEXT:    s_cmp_lg_u32 s4, 12
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 13
 ; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v2, s11
-; VI-NEXT:    s_lshr_b32 s5, s10, 24
-; VI-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 12
 ; VI-NEXT:    v_cndmask_b32_e32 v2, 5, v2, vcc
+; VI-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
 ; VI-NEXT:    v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT:    s_cmp_lg_u32 s4, 11
+; VI-NEXT:    s_lshr_b32 s5, s10, 24
 ; VI-NEXT:    v_or_b32_sdwa v3, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 11
 ; VI-NEXT:    s_lshr_b32 s5, s10, 16
-; VI-NEXT:    s_cmp_lg_u32 s4, 10
 ; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_lshr_b32 s5, s10, 8
-; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 10
 ; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; VI-NEXT:    s_cmp_lg_u32 s4, 9
+; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    s_lshr_b32 s5, s10, 8
 ; VI-NEXT:    v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
-; VI-NEXT:    s_cmp_lg_u32 s4, 8
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 9
 ; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v2, s10
-; VI-NEXT:    s_lshr_b32 s5, s9, 24
-; VI-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 8
 ; VI-NEXT:    v_cndmask_b32_e32 v2, 5, v2, vcc
+; VI-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
 ; VI-NEXT:    v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT:    s_cmp_lg_u32 s4, 7
+; VI-NEXT:    s_lshr_b32 s5, s9, 24
 ; VI-NEXT:    v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 7
 ; VI-NEXT:    s_lshr_b32 s5, s9, 16
-; VI-NEXT:    s_cmp_lg_u32 s4, 6
 ; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_lshr_b32 s5, s9, 8
-; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 6
 ; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; VI-NEXT:    s_cmp_lg_u32 s4, 5
+; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    s_lshr_b32 s5, s9, 8
 ; VI-NEXT:    v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
-; VI-NEXT:    s_cmp_lg_u32 s4, 4
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 5
 ; VI-NEXT:    v_cndmask_b32_e32 v1, 5, v1, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v4, s9
-; VI-NEXT:    s_lshr_b32 s5, s8, 24
-; VI-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 4
 ; VI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
+; VI-NEXT:    v_lshlrev_b16_e32 v1, 8, v1
 ; VI-NEXT:    v_or_b32_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT:    s_cmp_lg_u32 s4, 3
+; VI-NEXT:    s_lshr_b32 s5, s8, 24
 ; VI-NEXT:    v_or_b32_sdwa v1, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
 ; VI-NEXT:    v_mov_b32_e32 v0, s5
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 3
 ; VI-NEXT:    s_lshr_b32 s5, s8, 16
-; VI-NEXT:    s_cmp_lg_u32 s4, 2
 ; VI-NEXT:    v_cndmask_b32_e32 v0, 5, v0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v4, s5
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
-; VI-NEXT:    s_lshr_b32 s5, s8, 8
-; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 2
 ; VI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
-; VI-NEXT:    s_cmp_lg_u32 s4, 1
+; VI-NEXT:    v_lshlrev_b16_e32 v0, 8, v0
+; VI-NEXT:    s_lshr_b32 s5, s8, 8
 ; VI-NEXT:    v_or_b32_sdwa v0, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v4, s5
-; VI-NEXT:    s_cmp_lg_u32 s4, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 1
 ; VI-NEXT:    v_cndmask_b32_e32 v4, 5, v4, vcc
 ; VI-NEXT:    v_mov_b32_e32 v5, s8
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_ne_u32_e64 vcc, s4, 0
 ; VI-NEXT:    v_lshlrev_b16_e32 v4, 8, v4
 ; VI-NEXT:    v_cndmask_b32_e32 v5, 5, v5, vcc
 ; VI-NEXT:    v_or_b32_sdwa v4, v5, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -1516,14 +1416,12 @@ define amdgpu_kernel void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s11
-; SI-NEXT:    s_cmp_eq_u32 s4, 1
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 1
 ; SI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
-; SI-NEXT:    s_cmp_eq_u32 s4, 0
 ; SI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -1540,14 +1438,12 @@ define amdgpu_kernel void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s11
-; VI-NEXT:    s_cmp_eq_u32 s4, 1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 1
 ; VI-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    s_cmp_eq_u32 s4, 0
 ; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
 ; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -1568,14 +1464,12 @@ define amdgpu_kernel void @dynamic_insertelement_v2i64(<2 x i64> addrspace(1)* %
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s11
-; SI-NEXT:    s_cmp_eq_u32 s6, 1
-; SI-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 1
 ; SI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
-; SI-NEXT:    s_cmp_eq_u32 s6, 0
 ; SI-NEXT:    v_cndmask_b32_e64 v2, v0, 5, s[4:5]
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
-; SI-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 0
 ; SI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 5, s[4:5]
@@ -1591,14 +1485,12 @@ define amdgpu_kernel void @dynamic_insertelement_v2i64(<2 x i64> addrspace(1)* %
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s11
-; VI-NEXT:    s_cmp_eq_u32 s6, 1
-; VI-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 1
 ; VI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    s_cmp_eq_u32 s6, 0
 ; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 5, s[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 0
 ; VI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
 ; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 5, s[4:5]
@@ -1619,20 +1511,17 @@ define amdgpu_kernel void @dynamic_insertelement_v3i64(<3 x i64> addrspace(1)* %
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s13
-; SI-NEXT:    s_cmp_eq_u32 s6, 2
-; SI-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 2
 ; SI-NEXT:    v_cndmask_b32_e64 v5, v0, 0, s[4:5]
 ; SI-NEXT:    v_mov_b32_e32 v0, s12
-; SI-NEXT:    s_cmp_eq_u32 s6, 1
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v0, 5, s[4:5]
-; SI-NEXT:    s_cselect_b64 s[4:5], 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 1
 ; SI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
-; SI-NEXT:    s_cmp_eq_u32 s6, 0
 ; SI-NEXT:    v_cndmask_b32_e64 v2, v0, 5, s[4:5]
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
-; SI-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 0
 ; SI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 5, s[4:5]
@@ -1649,20 +1538,17 @@ define amdgpu_kernel void @dynamic_insertelement_v3i64(<3 x i64> addrspace(1)* %
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s13
-; VI-NEXT:    s_cmp_eq_u32 s6, 2
-; VI-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 2
 ; VI-NEXT:    v_cndmask_b32_e64 v5, v0, 0, s[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v0, s12
-; VI-NEXT:    s_cmp_eq_u32 s6, 1
 ; VI-NEXT:    v_cndmask_b32_e64 v4, v0, 5, s[4:5]
-; VI-NEXT:    s_cselect_b64 s[4:5], 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s11
+; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 1
 ; VI-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    s_cmp_eq_u32 s6, 0
 ; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 5, s[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
-; VI-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 s[4:5], s6, 0
 ; VI-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[4:5]
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
 ; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 5, s[4:5]
@@ -1685,26 +1571,22 @@ define amdgpu_kernel void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s11
-; SI-NEXT:    s_cmp_eq_u32 s4, 1
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 1
 ; SI-NEXT:    v_cndmask_b32_e32 v3, v0, v4, vcc
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
-; SI-NEXT:    s_cmp_eq_u32 s4, 0
 ; SI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v0, s9
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v1, v0, v4, vcc
 ; SI-NEXT:    v_mov_b32_e32 v0, s8
-; SI-NEXT:    s_cmp_eq_u32 s4, 3
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 3
 ; SI-NEXT:    v_cndmask_b32_e32 v7, v5, v4, vcc
 ; SI-NEXT:    v_mov_b32_e32 v5, s14
-; SI-NEXT:    s_cmp_eq_u32 s4, 2
 ; SI-NEXT:    v_cndmask_b32_e64 v6, v5, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v5, s13
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 2
 ; SI-NEXT:    v_cndmask_b32_e32 v5, v5, v4, vcc
 ; SI-NEXT:    v_mov_b32_e32 v4, s12
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
@@ -1722,26 +1604,22 @@ define amdgpu_kernel void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)
 ; VI-NEXT:    s_mov_b32 s2, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s11
-; VI-NEXT:    s_cmp_eq_u32 s4, 1
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 1
 ; VI-NEXT:    v_cndmask_b32_e32 v3, v0, v4, vcc
 ; VI-NEXT:    v_mov_b32_e32 v0, s10
-; VI-NEXT:    s_cmp_eq_u32 s4, 0
 ; VI-NEXT:    v_cndmask_b32_e64 v2, v0, 0, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v0, s9
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v1, v0, v4, vcc
 ; VI-NEXT:    v_mov_b32_e32 v0, s8
-; VI-NEXT:    s_cmp_eq_u32 s4, 3
 ; VI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
 ; VI-NEXT:    v_mov_b32_e32 v5, s15
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 3
 ; VI-NEXT:    v_cndmask_b32_e32 v7, v5, v4, vcc
 ; VI-NEXT:    v_mov_b32_e32 v5, s14
-; VI-NEXT:    s_cmp_eq_u32 s4, 2
 ; VI-NEXT:    v_cndmask_b32_e64 v6, v5, 0, vcc
 ; VI-NEXT:    v_mov_b32_e32 v5, s13
-; VI-NEXT:    s_cselect_b64 vcc, 1, 0
+; VI-NEXT:    v_cmp_eq_u32_e64 vcc, s4, 2
 ; VI-NEXT:    v_cndmask_b32_e32 v5, v5, v4, vcc
 ; VI-NEXT:    v_mov_b32_e32 v4, s12
 ; VI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll
index 4b929882f166..63c1556212de 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.div.fmas.ll
@@ -86,7 +86,7 @@ define amdgpu_kernel void @test_div_fmas_f64(double addrspace(1)* %out, double %
 }
 
 ; GCN-LABEL: {{^}}test_div_fmas_f32_cond_to_vcc:
-; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 0{{$}}
+; GCN: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}}
 ; GCN: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
 define amdgpu_kernel void @test_div_fmas_f32_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c, i32 %i) nounwind {
   %cmp = icmp eq i32 %i, 0
@@ -119,8 +119,7 @@ define amdgpu_kernel void @test_div_fmas_f32_imm_true_cond_to_vcc(float addrspac
 ; SI-DAG: buffer_load_dword [[C:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}}
 
 ; SI-DAG: v_cmp_eq_u32_e32 [[CMP0:vcc]], 0, v{{[0-9]+}}
-; SI-DAG: s_cmp_lg_u32 s{{[0-9]+}}, 0{{$}}
-; SI-DAG: s_cselect_b64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 1, 0
+; SI-DAG: v_cmp_ne_u32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0{{$}}
 ; SI: s_and_b64 vcc, [[CMP0]], [[CMP1]]
 ; SI: v_div_fmas_f32 {{v[0-9]+}}, [[A]], [[B]], [[C]]
 ; SI: s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll
index 7aabb8498b73..d788fda2d0e4 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.icmp.ll
@@ -298,10 +298,8 @@ define amdgpu_kernel void @v_icmp_i16_sle(i64 addrspace(1)* %out, i16 %src) {
 }
 
 ; GCN-LABEL: {{^}}v_icmp_i1_ne0:
-; GCN: s_cmp_gt_u32 s{{[0-9]+}}, 1
-; GCN: s_cselect_b64 s[[C0:\[[0-9]+:[0-9]+\]]],
-; GCN: s_cmp_gt_u32 s{{[0-9]+}}, 2
-; GCN: s_cselect_b64 s[[C1:\[[0-9]+:[0-9]+\]]],
+; GCN: v_cmp_gt_u32_e64 s[[C0:\[[0-9]+:[0-9]+\]]],
+; GCN: v_cmp_gt_u32_e64 s[[C1:\[[0-9]+:[0-9]+\]]],
 ; GCN: s_and_b64 s[[SRC:\[[0-9]+:[0-9]+\]]], s[[C0]], s[[C1]]
 ; SI-NEXT: s_mov_b32 s{{[0-9]+}}, -1
 ; GCN-NEXT: v_mov_b32_e32

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll
index 821552bc87d9..f324ba98ebee 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.private.ll
@@ -29,10 +29,8 @@ define amdgpu_kernel void @is_private_vgpr(i8* addrspace(1)* %ptr.ptr) {
 ; GFX9-DAG: s_load_dword [[PTR_HI:s[0-9]+]], s[6:7], 0x4{{$}}
 ; GFX9: s_lshl_b32 [[APERTURE]], [[APERTURE]], 16
 
-; GCN: s_cmp_eq_u32 [[PTR_HI]], [[APERTURE]]
-; GCN: s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: s_andn2_b64 vcc, exec, [[MASK]]
-
+; GCN: v_mov_b32_e32 [[V_APERTURE:v[0-9]+]], [[APERTURE]]
+; GCN: v_cmp_eq_u32_e32 vcc, [[PTR_HI]], [[V_APERTURE]]
 ; GCN: s_cbranch_vccnz
 define amdgpu_kernel void @is_private_sgpr(i8* %ptr) {
   %val = call i1 @llvm.amdgcn.is.private(i8* %ptr)

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll
index b5e05ee47188..1371392eb0a5 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.is.shared.ll
@@ -30,9 +30,8 @@ define amdgpu_kernel void @is_local_vgpr(i8* addrspace(1)* %ptr.ptr) {
 ; CI-DAG: s_load_dword [[PTR_HI:s[0-9]+]], s[6:7], 0x1{{$}}
 ; GFX9-DAG: s_load_dword [[PTR_HI:s[0-9]+]], s[6:7], 0x4{{$}}
 
-; GCN: s_cmp_eq_u32 [[PTR_HI]], [[APERTURE]]
-; GCN: s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: s_andn2_b64 vcc, exec, [[MASK]]
+; GCN: v_mov_b32_e32 [[V_APERTURE:v[0-9]+]], [[APERTURE]]
+; GCN: v_cmp_eq_u32_e32 vcc, [[PTR_HI]], [[V_APERTURE]]
 ; GCN: s_cbranch_vccnz
 define amdgpu_kernel void @is_local_sgpr(i8* %ptr) {
   %val = call i1 @llvm.amdgcn.is.shared(i8* %ptr)

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
index 2494ffb5ed5a..a0e24e0d9ee8 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.kill.ll
@@ -260,7 +260,7 @@ define amdgpu_ps void @test_non_inline_imm_sgpr(float inreg %a) #0 {
 }
 
 ; GCN-LABEL: {{^}}test_scc_liveness:
-; GCN: s_cmp
+; GCN: v_cmp
 ; GCN: s_and_b64 exec
 ; GCN: s_cmp
 ; GCN: s_cbranch_scc

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll b/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
index 5ff6f0af21cd..3bf4d3330763 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
@@ -185,10 +185,9 @@ define amdgpu_kernel void @umulo_i64_s(i64 %x, i64 %y) {
 ; GFX9-NEXT:    s_addc_u32 s5, 0, s5
 ; GFX9-NEXT:    s_add_i32 s1, s8, s7
 ; GFX9-NEXT:    s_add_i32 s1, s1, s6
-; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; GFX9-NEXT:    s_mul_i32 s2, s0, s2
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s1
-; GFX9-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GFX9-NEXT:    v_cmp_ne_u64_e64 s[0:1], s[4:5], 0
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
@@ -219,11 +218,10 @@ define amdgpu_kernel void @smulo_i64_s(i64 %x, i64 %y) {
 ; SI-NEXT:    v_mul_hi_u32 v1, s0, v1
 ; SI-NEXT:    v_mul_hi_i32 v3, s1, v3
 ; SI-NEXT:    s_mul_i32 s6, s1, s3
-; SI-NEXT:    s_cmp_lt_i32 s1, 0
-; SI-NEXT:    s_mul_i32 s1, s0, s2
+; SI-NEXT:    s_mul_i32 s8, s0, s2
 ; SI-NEXT:    v_add_i32_e32 v5, vcc, s5, v1
 ; SI-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; SI-NEXT:    v_mov_b32_e32 v6, s1
+; SI-NEXT:    v_mov_b32_e32 v6, s8
 ; SI-NEXT:    v_add_i32_e32 v5, vcc, s4, v5
 ; SI-NEXT:    v_addc_u32_e32 v2, vcc, v4, v2, vcc
 ; SI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
@@ -233,15 +231,14 @@ define amdgpu_kernel void @smulo_i64_s(i64 %x, i64 %y) {
 ; SI-NEXT:    v_add_i32_e32 v4, vcc, s4, v1
 ; SI-NEXT:    v_subrev_i32_e32 v1, vcc, s2, v2
 ; SI-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v3, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_lt_i32 s3, 0
 ; SI-NEXT:    v_ashrrev_i32_e32 v0, 31, v4
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s1, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
 ; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v1, vcc
 ; SI-NEXT:    v_mov_b32_e32 v1, v0
 ; SI-NEXT:    v_subrev_i32_e32 v5, vcc, s0, v2
 ; SI-NEXT:    v_subbrev_u32_e32 v7, vcc, 0, v3, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s3, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc
 ; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
 ; SI-NEXT:    v_cmp_ne_u64_e32 vcc, v[2:3], v[0:1]
@@ -271,8 +268,7 @@ define amdgpu_kernel void @smulo_i64_s(i64 %x, i64 %y) {
 ; GFX9-NEXT:    s_addc_u32 s6, 0, s6
 ; GFX9-NEXT:    s_sub_u32 s9, s4, s2
 ; GFX9-NEXT:    s_subb_u32 s10, s6, 0
-; GFX9-NEXT:    s_cmp_lt_i32 s1, 0
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
+; GFX9-NEXT:    v_cmp_lt_i32_e64 vcc, s1, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s10
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
@@ -280,11 +276,10 @@ define amdgpu_kernel void @smulo_i64_s(i64 %x, i64 %y) {
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s9
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v1, v2, vcc
 ; GFX9-NEXT:    v_subrev_co_u32_e32 v3, vcc, s0, v2
-; GFX9-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v0, vcc
-; GFX9-NEXT:    s_cmp_lt_i32 s3, 0
-; GFX9-NEXT:    s_cselect_b64 vcc, 1, 0
 ; GFX9-NEXT:    s_add_i32 s1, s8, s7
+; GFX9-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v0, vcc
 ; GFX9-NEXT:    s_add_i32 s1, s1, s5
+; GFX9-NEXT:    v_cmp_lt_i32_e64 vcc, s3, 0
 ; GFX9-NEXT:    s_ashr_i32 s4, s1, 31
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v0, v1, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
index 0b9752dd1093..7d0d4eee1f04 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.round.f64.ll
@@ -16,14 +16,12 @@ define amdgpu_kernel void @round_f64(double addrspace(1)* %out, double %x) #0 {
 ; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s5
 ; SI-NEXT:    s_andn2_b64 s[2:3], s[10:11], s[0:1]
 ; SI-NEXT:    s_and_b32 s0, s11, 0x80000000
-; SI-NEXT:    s_cmp_lt_i32 s5, 0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_gt_i32 s5, 51
 ; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s5, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], s5, 51
 ; SI-NEXT:    v_mov_b32_e32 v1, s11
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
 ; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v0, s2
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -157,14 +155,12 @@ define amdgpu_kernel void @round_v2f64(<2 x double> addrspace(1)* %out, <2 x dou
 ; SI-NEXT:    s_brev_b32 s15, 1
 ; SI-NEXT:    s_andn2_b64 s[12:13], s[10:11], s[0:1]
 ; SI-NEXT:    s_and_b32 s0, s11, s15
-; SI-NEXT:    s_cmp_lt_i32 s14, 0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    v_mov_b32_e32 v0, s13
 ; SI-NEXT:    v_mov_b32_e32 v1, s0
-; SI-NEXT:    s_cmp_gt_i32 s14, 51
+; SI-NEXT:    v_mov_b32_e32 v0, s13
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s14, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; SI-NEXT:    v_mov_b32_e32 v1, s11
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], s14, 51
 ; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v0, s12
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -173,25 +169,23 @@ define amdgpu_kernel void @round_v2f64(<2 x double> addrspace(1)* %out, <2 x dou
 ; SI-NEXT:    v_add_f64 v[2:3], s[10:11], -v[0:1]
 ; SI-NEXT:    s_bfe_u32 s0, s9, 0xb0014
 ; SI-NEXT:    s_add_i32 s7, s0, s7
-; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s7
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
 ; SI-NEXT:    s_brev_b32 s10, -2
 ; SI-NEXT:    v_mov_b32_e32 v6, 0x3ff00000
 ; SI-NEXT:    v_mov_b32_e32 v4, s11
+; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s7
 ; SI-NEXT:    v_bfi_b32 v4, s10, v6, v4
-; SI-NEXT:    s_andn2_b64 s[2:3], s[8:9], s[0:1]
-; SI-NEXT:    s_and_b32 s0, s9, s15
 ; SI-NEXT:    v_cndmask_b32_e32 v3, 0, v4, vcc
 ; SI-NEXT:    v_mov_b32_e32 v2, 0
-; SI-NEXT:    s_cmp_lt_i32 s7, 0
+; SI-NEXT:    s_andn2_b64 s[2:3], s[8:9], s[0:1]
+; SI-NEXT:    s_and_b32 s0, s9, s15
 ; SI-NEXT:    v_add_f64 v[2:3], v[0:1], v[2:3]
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_gt_i32 s7, 51
 ; SI-NEXT:    v_mov_b32_e32 v1, s0
 ; SI-NEXT:    v_mov_b32_e32 v0, s3
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s7, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], s7, 51
 ; SI-NEXT:    v_mov_b32_e32 v1, s9
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
 ; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v0, s2
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -256,14 +250,12 @@ define amdgpu_kernel void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x dou
 ; SI-NEXT:    s_brev_b32 s20, 1
 ; SI-NEXT:    s_andn2_b64 s[16:17], s[10:11], s[0:1]
 ; SI-NEXT:    s_and_b32 s0, s11, s20
-; SI-NEXT:    s_cmp_lt_i32 s19, 0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    v_mov_b32_e32 v0, s17
 ; SI-NEXT:    v_mov_b32_e32 v1, s0
-; SI-NEXT:    s_cmp_gt_i32 s19, 51
+; SI-NEXT:    v_mov_b32_e32 v0, s17
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s19, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; SI-NEXT:    v_mov_b32_e32 v1, s11
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], s19, 51
 ; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v0, s16
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -276,44 +268,40 @@ define amdgpu_kernel void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x dou
 ; SI-NEXT:    s_brev_b32 s16, -2
 ; SI-NEXT:    v_mov_b32_e32 v12, 0x3ff00000
 ; SI-NEXT:    v_mov_b32_e32 v4, s11
-; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s17
 ; SI-NEXT:    v_bfi_b32 v4, s16, v12, v4
-; SI-NEXT:    s_andn2_b64 s[10:11], s[8:9], s[0:1]
-; SI-NEXT:    s_and_b32 s0, s9, s20
+; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s17
 ; SI-NEXT:    v_cndmask_b32_e32 v3, 0, v4, vcc
 ; SI-NEXT:    v_mov_b32_e32 v2, 0
-; SI-NEXT:    s_cmp_lt_i32 s17, 0
+; SI-NEXT:    s_andn2_b64 s[10:11], s[8:9], s[0:1]
+; SI-NEXT:    s_and_b32 s0, s9, s20
 ; SI-NEXT:    v_add_f64 v[2:3], v[0:1], v[2:3]
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    v_mov_b32_e32 v0, s11
 ; SI-NEXT:    v_mov_b32_e32 v1, s0
-; SI-NEXT:    s_cmp_gt_i32 s17, 51
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s17, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; SI-NEXT:    v_mov_b32_e32 v1, s9
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], s17, 51
 ; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v0, s10
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; SI-NEXT:    v_mov_b32_e32 v4, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s[0:1]
+; SI-NEXT:    v_add_f64 v[4:5], s[8:9], -v[0:1]
 ; SI-NEXT:    s_bfe_u32 s0, s15, 0xb0014
 ; SI-NEXT:    s_add_i32 s10, s0, s18
-; SI-NEXT:    v_add_f64 v[4:5], s[8:9], -v[0:1]
-; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s10
 ; SI-NEXT:    v_mov_b32_e32 v6, s9
-; SI-NEXT:    s_andn2_b64 s[8:9], s[14:15], s[0:1]
+; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s10
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[4:5]|, 0.5
-; SI-NEXT:    s_and_b32 s0, s15, s20
+; SI-NEXT:    s_andn2_b64 s[8:9], s[14:15], s[0:1]
 ; SI-NEXT:    v_bfi_b32 v6, s16, v12, v6
-; SI-NEXT:    s_cmp_lt_i32 s10, 0
+; SI-NEXT:    s_and_b32 s0, s15, s20
 ; SI-NEXT:    v_cndmask_b32_e32 v9, 0, v6, vcc
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    v_mov_b32_e32 v4, s9
 ; SI-NEXT:    v_mov_b32_e32 v5, s0
-; SI-NEXT:    s_cmp_gt_i32 s10, 51
+; SI-NEXT:    v_mov_b32_e32 v4, s9
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s10, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
 ; SI-NEXT:    v_mov_b32_e32 v5, s15
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], s10, 51
 ; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v5, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v4, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
@@ -321,24 +309,22 @@ define amdgpu_kernel void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x dou
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s[0:1]
 ; SI-NEXT:    v_add_f64 v[6:7], s[14:15], -v[4:5]
 ; SI-NEXT:    s_bfe_u32 s0, s13, 0xb0014
-; SI-NEXT:    s_add_i32 s8, s0, s18
 ; SI-NEXT:    v_mov_b32_e32 v10, s15
-; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s8
+; SI-NEXT:    s_add_i32 s8, s0, s18
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[6:7]|, 0.5
+; SI-NEXT:    s_lshr_b64 s[0:1], s[2:3], s8
 ; SI-NEXT:    v_bfi_b32 v10, s16, v12, v10
-; SI-NEXT:    s_andn2_b64 s[2:3], s[12:13], s[0:1]
-; SI-NEXT:    s_and_b32 s0, s13, s20
 ; SI-NEXT:    v_cndmask_b32_e32 v7, 0, v10, vcc
 ; SI-NEXT:    v_mov_b32_e32 v6, 0
-; SI-NEXT:    s_cmp_lt_i32 s8, 0
+; SI-NEXT:    s_andn2_b64 s[2:3], s[12:13], s[0:1]
+; SI-NEXT:    s_and_b32 s0, s13, s20
 ; SI-NEXT:    v_add_f64 v[6:7], v[4:5], v[6:7]
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    v_mov_b32_e32 v4, s3
 ; SI-NEXT:    v_mov_b32_e32 v5, s0
-; SI-NEXT:    s_cmp_gt_i32 s8, 51
+; SI-NEXT:    v_mov_b32_e32 v4, s3
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s8, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
 ; SI-NEXT:    v_mov_b32_e32 v5, s13
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; SI-NEXT:    v_cmp_gt_i32_e64 s[0:1], s8, 51
 ; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v5, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v4, s2
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
@@ -410,210 +396,194 @@ define amdgpu_kernel void @round_v4f64(<4 x double> addrspace(1)* %out, <4 x dou
 define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %in) #0 {
 ; SI-LABEL: round_v8f64:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx16 s[16:31], s[0:1], 0x19
-; SI-NEXT:    s_mov_b32 s14, -1
-; SI-NEXT:    s_movk_i32 s15, 0xfc01
-; SI-NEXT:    s_mov_b32 s13, 0xfffff
-; SI-NEXT:    s_mov_b32 s12, s14
+; SI-NEXT:    s_load_dwordx16 s[8:23], s[0:1], 0x19
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_movk_i32 s7, 0xfc01
+; SI-NEXT:    s_mov_b32 s5, 0xfffff
+; SI-NEXT:    s_mov_b32 s4, s6
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_bfe_u32 s2, s19, 0xb0014
-; SI-NEXT:    s_add_i32 s6, s2, s15
-; SI-NEXT:    s_lshr_b64 s[2:3], s[12:13], s6
-; SI-NEXT:    s_brev_b32 s33, 1
-; SI-NEXT:    s_andn2_b64 s[4:5], s[18:19], s[2:3]
-; SI-NEXT:    s_and_b32 s2, s19, s33
-; SI-NEXT:    s_cmp_lt_i32 s6, 0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    v_mov_b32_e32 v0, s5
+; SI-NEXT:    s_bfe_u32 s2, s11, 0xb0014
+; SI-NEXT:    s_add_i32 s26, s2, s7
+; SI-NEXT:    s_lshr_b64 s[2:3], s[4:5], s26
+; SI-NEXT:    s_brev_b32 s27, 1
+; SI-NEXT:    s_andn2_b64 s[24:25], s[10:11], s[2:3]
+; SI-NEXT:    s_and_b32 s2, s11, s27
 ; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_cmp_gt_i32 s6, 51
+; SI-NEXT:    v_mov_b32_e32 v0, s25
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s26, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v1, s19
-; SI-NEXT:    s_cselect_b64 s[2:3], 1, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s11
+; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s26, 51
 ; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[2:3]
-; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v0, s24
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
-; SI-NEXT:    v_mov_b32_e32 v2, s18
+; SI-NEXT:    v_mov_b32_e32 v2, s10
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[2:3]
-; SI-NEXT:    v_add_f64 v[2:3], s[18:19], -v[0:1]
-; SI-NEXT:    s_bfe_u32 s2, s17, 0xb0014
-; SI-NEXT:    s_add_i32 s6, s2, s15
+; SI-NEXT:    v_add_f64 v[2:3], s[10:11], -v[0:1]
+; SI-NEXT:    s_bfe_u32 s2, s9, 0xb0014
+; SI-NEXT:    s_add_i32 s25, s2, s7
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[2:3]|, 0.5
-; SI-NEXT:    s_lshr_b64 s[2:3], s[12:13], s6
-; SI-NEXT:    s_brev_b32 s34, -2
-; SI-NEXT:    v_mov_b32_e32 v14, 0x3ff00000
-; SI-NEXT:    v_mov_b32_e32 v4, s19
-; SI-NEXT:    v_bfi_b32 v4, s34, v14, v4
-; SI-NEXT:    s_andn2_b64 s[4:5], s[16:17], s[2:3]
-; SI-NEXT:    s_and_b32 s2, s17, s33
+; SI-NEXT:    s_brev_b32 s24, -2
+; SI-NEXT:    v_mov_b32_e32 v18, 0x3ff00000
+; SI-NEXT:    v_mov_b32_e32 v4, s11
+; SI-NEXT:    v_bfi_b32 v4, s24, v18, v4
+; SI-NEXT:    s_lshr_b64 s[2:3], s[4:5], s25
 ; SI-NEXT:    v_cndmask_b32_e32 v3, 0, v4, vcc
 ; SI-NEXT:    v_mov_b32_e32 v2, 0
-; SI-NEXT:    s_cmp_lt_i32 s6, 0
+; SI-NEXT:    s_andn2_b64 s[10:11], s[8:9], s[2:3]
+; SI-NEXT:    s_and_b32 s2, s9, s27
 ; SI-NEXT:    v_add_f64 v[2:3], v[0:1], v[2:3]
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    v_mov_b32_e32 v0, s5
 ; SI-NEXT:    v_mov_b32_e32 v1, s2
-; SI-NEXT:    s_cmp_gt_i32 s6, 51
+; SI-NEXT:    v_mov_b32_e32 v0, s11
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s25, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; SI-NEXT:    v_mov_b32_e32 v1, s17
-; SI-NEXT:    s_cselect_b64 s[2:3], 1, 0
+; SI-NEXT:    v_mov_b32_e32 v1, s9
+; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s25, 51
 ; SI-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[2:3]
-; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v0, s10
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
-; SI-NEXT:    v_mov_b32_e32 v4, s16
+; SI-NEXT:    v_mov_b32_e32 v4, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s[2:3]
-; SI-NEXT:    v_add_f64 v[4:5], s[16:17], -v[0:1]
-; SI-NEXT:    s_bfe_u32 s2, s23, 0xb0014
-; SI-NEXT:    s_add_i32 s6, s2, s15
-; SI-NEXT:    v_mov_b32_e32 v6, s17
+; SI-NEXT:    v_add_f64 v[4:5], s[8:9], -v[0:1]
+; SI-NEXT:    s_bfe_u32 s2, s15, 0xb0014
+; SI-NEXT:    v_mov_b32_e32 v6, s9
+; SI-NEXT:    s_add_i32 s10, s2, s7
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[4:5]|, 0.5
-; SI-NEXT:    s_lshr_b64 s[2:3], s[12:13], s6
-; SI-NEXT:    v_bfi_b32 v6, s34, v14, v6
-; SI-NEXT:    s_andn2_b64 s[4:5], s[22:23], s[2:3]
-; SI-NEXT:    s_and_b32 s2, s23, s33
+; SI-NEXT:    v_bfi_b32 v6, s24, v18, v6
+; SI-NEXT:    s_lshr_b64 s[2:3], s[4:5], s10
 ; SI-NEXT:    v_cndmask_b32_e32 v5, 0, v6, vcc
 ; SI-NEXT:    v_mov_b32_e32 v4, 0
-; SI-NEXT:    s_cmp_lt_i32 s6, 0
+; SI-NEXT:    s_andn2_b64 s[8:9], s[14:15], s[2:3]
+; SI-NEXT:    s_and_b32 s2, s15, s27
 ; SI-NEXT:    v_add_f64 v[0:1], v[0:1], v[4:5]
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    v_mov_b32_e32 v4, s5
 ; SI-NEXT:    v_mov_b32_e32 v5, s2
-; SI-NEXT:    s_cmp_gt_i32 s6, 51
+; SI-NEXT:    v_mov_b32_e32 v4, s9
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s10, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s23
-; SI-NEXT:    s_cselect_b64 s[2:3], 1, 0
+; SI-NEXT:    v_mov_b32_e32 v5, s15
+; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s10, 51
 ; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v5, s[2:3]
-; SI-NEXT:    v_mov_b32_e32 v4, s4
+; SI-NEXT:    v_mov_b32_e32 v4, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
-; SI-NEXT:    v_mov_b32_e32 v6, s22
+; SI-NEXT:    v_mov_b32_e32 v6, s14
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, v6, s[2:3]
-; SI-NEXT:    v_add_f64 v[6:7], s[22:23], -v[4:5]
-; SI-NEXT:    s_bfe_u32 s2, s21, 0xb0014
-; SI-NEXT:    s_add_i32 s6, s2, s15
-; SI-NEXT:    v_mov_b32_e32 v8, s23
+; SI-NEXT:    v_add_f64 v[6:7], s[14:15], -v[4:5]
+; SI-NEXT:    s_bfe_u32 s2, s13, 0xb0014
+; SI-NEXT:    v_mov_b32_e32 v8, s15
+; SI-NEXT:    s_add_i32 s10, s2, s7
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[6:7]|, 0.5
-; SI-NEXT:    s_lshr_b64 s[2:3], s[12:13], s6
-; SI-NEXT:    v_bfi_b32 v8, s34, v14, v8
-; SI-NEXT:    s_andn2_b64 s[4:5], s[20:21], s[2:3]
-; SI-NEXT:    s_and_b32 s2, s21, s33
+; SI-NEXT:    v_bfi_b32 v8, s24, v18, v8
+; SI-NEXT:    s_lshr_b64 s[2:3], s[4:5], s10
 ; SI-NEXT:    v_cndmask_b32_e32 v7, 0, v8, vcc
 ; SI-NEXT:    v_mov_b32_e32 v6, 0
-; SI-NEXT:    s_cmp_lt_i32 s6, 0
+; SI-NEXT:    s_andn2_b64 s[8:9], s[12:13], s[2:3]
+; SI-NEXT:    s_and_b32 s2, s13, s27
 ; SI-NEXT:    v_add_f64 v[6:7], v[4:5], v[6:7]
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    v_mov_b32_e32 v4, s5
 ; SI-NEXT:    v_mov_b32_e32 v5, s2
-; SI-NEXT:    s_cmp_gt_i32 s6, 51
+; SI-NEXT:    v_mov_b32_e32 v4, s9
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s10, 0
 ; SI-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; SI-NEXT:    v_mov_b32_e32 v5, s21
-; SI-NEXT:    s_cselect_b64 s[2:3], 1, 0
+; SI-NEXT:    v_mov_b32_e32 v5, s13
+; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s10, 51
 ; SI-NEXT:    v_cndmask_b32_e64 v5, v4, v5, s[2:3]
-; SI-NEXT:    v_mov_b32_e32 v4, s4
+; SI-NEXT:    v_mov_b32_e32 v4, s8
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
-; SI-NEXT:    v_mov_b32_e32 v8, s20
+; SI-NEXT:    v_mov_b32_e32 v8, s12
 ; SI-NEXT:    v_cndmask_b32_e64 v4, v4, v8, s[2:3]
-; SI-NEXT:    s_bfe_u32 s2, s27, 0xb0014
-; SI-NEXT:    s_add_i32 s4, s2, s15
-; SI-NEXT:    v_add_f64 v[8:9], s[20:21], -v[4:5]
-; SI-NEXT:    s_lshr_b64 s[2:3], s[12:13], s4
-; SI-NEXT:    v_mov_b32_e32 v10, s21
+; SI-NEXT:    v_add_f64 v[8:9], s[12:13], -v[4:5]
+; SI-NEXT:    s_bfe_u32 s2, s19, 0xb0014
+; SI-NEXT:    v_mov_b32_e32 v10, s13
+; SI-NEXT:    s_add_i32 s10, s2, s7
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[8:9]|, 0.5
-; SI-NEXT:    s_andn2_b64 s[16:17], s[26:27], s[2:3]
-; SI-NEXT:    s_and_b32 s2, s27, s33
-; SI-NEXT:    v_bfi_b32 v10, s34, v14, v10
-; SI-NEXT:    s_cmp_lt_i32 s4, 0
+; SI-NEXT:    v_bfi_b32 v10, s24, v18, v10
+; SI-NEXT:    s_lshr_b64 s[2:3], s[4:5], s10
 ; SI-NEXT:    v_cndmask_b32_e32 v9, 0, v10, vcc
 ; SI-NEXT:    v_mov_b32_e32 v8, 0
-; SI-NEXT:    s_cselect_b64 vcc, 1, 0
-; SI-NEXT:    s_cmp_gt_i32 s4, 51
+; SI-NEXT:    s_andn2_b64 s[8:9], s[18:19], s[2:3]
+; SI-NEXT:    s_and_b32 s2, s19, s27
 ; SI-NEXT:    v_add_f64 v[4:5], v[4:5], v[8:9]
 ; SI-NEXT:    v_mov_b32_e32 v9, s2
-; SI-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; SI-NEXT:    s_bfe_u32 s4, s25, 0xb0014
-; SI-NEXT:    s_add_i32 s6, s4, s15
-; SI-NEXT:    s_lshr_b64 s[4:5], s[12:13], s6
-; SI-NEXT:    s_andn2_b64 s[18:19], s[24:25], s[4:5]
-; SI-NEXT:    s_and_b32 s4, s25, s33
-; SI-NEXT:    v_mov_b32_e32 v8, s17
-; SI-NEXT:    s_cmp_lt_i32 s6, 0
-; SI-NEXT:    v_cndmask_b32_e32 v15, v8, v9, vcc
-; SI-NEXT:    v_mov_b32_e32 v9, s4
-; SI-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; SI-NEXT:    s_cmp_gt_i32 s6, 51
-; SI-NEXT:    s_cselect_b64 s[6:7], 1, 0
-; SI-NEXT:    s_bfe_u32 s8, s31, 0xb0014
-; SI-NEXT:    s_add_i32 s17, s8, s15
-; SI-NEXT:    s_lshr_b64 s[8:9], s[12:13], s17
-; SI-NEXT:    s_andn2_b64 s[10:11], s[30:31], s[8:9]
-; SI-NEXT:    s_and_b32 s8, s31, s33
+; SI-NEXT:    v_mov_b32_e32 v8, s9
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s10, 0
+; SI-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
+; SI-NEXT:    v_mov_b32_e32 v9, s19
+; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s10, 51
+; SI-NEXT:    v_cndmask_b32_e64 v13, v8, v9, s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v8, s8
+; SI-NEXT:    v_cndmask_b32_e64 v8, v8, 0, vcc
+; SI-NEXT:    v_mov_b32_e32 v9, s18
+; SI-NEXT:    v_cndmask_b32_e64 v12, v8, v9, s[2:3]
+; SI-NEXT:    s_bfe_u32 s2, s17, 0xb0014
+; SI-NEXT:    s_add_i32 s12, s2, s7
+; SI-NEXT:    s_lshr_b64 s[2:3], s[4:5], s12
+; SI-NEXT:    s_andn2_b64 s[8:9], s[16:17], s[2:3]
+; SI-NEXT:    s_bfe_u32 s2, s23, 0xb0014
+; SI-NEXT:    s_add_i32 s14, s2, s7
+; SI-NEXT:    s_lshr_b64 s[2:3], s[4:5], s14
 ; SI-NEXT:    v_mov_b32_e32 v8, s19
-; SI-NEXT:    s_cmp_lt_i32 s17, 0
-; SI-NEXT:    v_cndmask_b32_e64 v17, v8, v9, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v9, s8
-; SI-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; SI-NEXT:    s_andn2_b64 s[10:11], s[22:23], s[2:3]
+; SI-NEXT:    s_and_b32 s2, s23, s27
+; SI-NEXT:    v_bfi_b32 v19, s24, v18, v8
+; SI-NEXT:    v_mov_b32_e32 v9, s2
 ; SI-NEXT:    v_mov_b32_e32 v8, s11
-; SI-NEXT:    s_cmp_gt_i32 s17, 51
-; SI-NEXT:    v_cndmask_b32_e64 v8, v8, v9, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v10, s10
-; SI-NEXT:    v_mov_b32_e32 v9, s31
-; SI-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; SI-NEXT:    v_cndmask_b32_e64 v9, v8, v9, s[10:11]
-; SI-NEXT:    v_cndmask_b32_e64 v8, v10, 0, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v10, s30
-; SI-NEXT:    s_bfe_u32 s8, s29, 0xb0014
-; SI-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s[10:11]
-; SI-NEXT:    s_add_i32 s10, s8, s15
-; SI-NEXT:    s_lshr_b64 s[8:9], s[12:13], s10
-; SI-NEXT:    s_andn2_b64 s[12:13], s[28:29], s[8:9]
-; SI-NEXT:    s_and_b32 s8, s29, s33
-; SI-NEXT:    s_cmp_lt_i32 s10, 0
-; SI-NEXT:    v_mov_b32_e32 v11, s8
-; SI-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; SI-NEXT:    v_mov_b32_e32 v10, s13
-; SI-NEXT:    s_cmp_gt_i32 s10, 51
-; SI-NEXT:    v_cndmask_b32_e64 v10, v10, v11, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v11, s29
-; SI-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; SI-NEXT:    v_cndmask_b32_e64 v13, v10, v11, s[10:11]
-; SI-NEXT:    v_mov_b32_e32 v10, s12
-; SI-NEXT:    v_cndmask_b32_e64 v10, v10, 0, s[8:9]
-; SI-NEXT:    v_mov_b32_e32 v11, s28
-; SI-NEXT:    v_cndmask_b32_e64 v12, v10, v11, s[10:11]
-; SI-NEXT:    v_add_f64 v[10:11], s[28:29], -v[12:13]
-; SI-NEXT:    v_mov_b32_e32 v19, s29
-; SI-NEXT:    v_cmp_ge_f64_e64 s[8:9], |v[10:11]|, 0.5
-; SI-NEXT:    v_mov_b32_e32 v10, s31
-; SI-NEXT:    v_bfi_b32 v20, s34, v14, v10
-; SI-NEXT:    v_add_f64 v[10:11], s[30:31], -v[8:9]
-; SI-NEXT:    v_bfi_b32 v19, s34, v14, v19
-; SI-NEXT:    v_cmp_ge_f64_e64 s[10:11], |v[10:11]|, 0.5
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s14, 0
+; SI-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
+; SI-NEXT:    v_mov_b32_e32 v9, s23
+; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s14, 51
+; SI-NEXT:    v_cndmask_b32_e64 v9, v8, v9, s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v8, s10
+; SI-NEXT:    v_cndmask_b32_e64 v8, v8, 0, vcc
+; SI-NEXT:    v_mov_b32_e32 v10, s22
+; SI-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s[2:3]
+; SI-NEXT:    s_bfe_u32 s2, s21, 0xb0014
+; SI-NEXT:    s_add_i32 s7, s2, s7
+; SI-NEXT:    s_lshr_b64 s[2:3], s[4:5], s7
+; SI-NEXT:    s_andn2_b64 s[4:5], s[20:21], s[2:3]
+; SI-NEXT:    s_and_b32 s2, s21, s27
+; SI-NEXT:    v_mov_b32_e32 v11, s2
+; SI-NEXT:    v_mov_b32_e32 v10, s5
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s7, 0
+; SI-NEXT:    v_cndmask_b32_e32 v10, v10, v11, vcc
+; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s7, 51
+; SI-NEXT:    v_mov_b32_e32 v11, s21
+; SI-NEXT:    v_cndmask_b32_e64 v15, v10, v11, s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v10, s4
+; SI-NEXT:    v_cndmask_b32_e64 v10, v10, 0, vcc
+; SI-NEXT:    v_mov_b32_e32 v11, s20
+; SI-NEXT:    v_cndmask_b32_e64 v14, v10, v11, s[2:3]
+; SI-NEXT:    v_add_f64 v[10:11], s[20:21], -v[14:15]
+; SI-NEXT:    v_mov_b32_e32 v17, s23
+; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[10:11]|, 0.5
+; SI-NEXT:    v_add_f64 v[10:11], s[22:23], -v[8:9]
+; SI-NEXT:    v_mov_b32_e32 v16, s21
+; SI-NEXT:    v_cmp_ge_f64_e64 s[2:3], |v[10:11]|, 0.5
+; SI-NEXT:    v_bfi_b32 v17, s24, v18, v17
+; SI-NEXT:    v_cndmask_b32_e64 v11, 0, v17, s[2:3]
 ; SI-NEXT:    v_mov_b32_e32 v10, 0
-; SI-NEXT:    v_cndmask_b32_e64 v11, 0, v20, s[10:11]
+; SI-NEXT:    v_bfi_b32 v16, s24, v18, v16
 ; SI-NEXT:    v_add_f64 v[10:11], v[8:9], v[10:11]
-; SI-NEXT:    v_cndmask_b32_e64 v9, 0, v19, s[8:9]
+; SI-NEXT:    v_cndmask_b32_e32 v9, 0, v16, vcc
 ; SI-NEXT:    v_mov_b32_e32 v8, 0
-; SI-NEXT:    v_add_f64 v[8:9], v[12:13], v[8:9]
-; SI-NEXT:    v_mov_b32_e32 v12, s16
-; SI-NEXT:    v_mov_b32_e32 v16, s27
-; SI-NEXT:    v_cndmask_b32_e64 v13, v15, v16, s[2:3]
-; SI-NEXT:    v_cndmask_b32_e64 v12, v12, 0, vcc
-; SI-NEXT:    v_mov_b32_e32 v15, s26
-; SI-NEXT:    v_cndmask_b32_e64 v12, v12, v15, s[2:3]
-; SI-NEXT:    v_mov_b32_e32 v15, s27
-; SI-NEXT:    v_bfi_b32 v19, s34, v14, v15
-; SI-NEXT:    v_mov_b32_e32 v15, s18
-; SI-NEXT:    v_mov_b32_e32 v18, s25
-; SI-NEXT:    v_cndmask_b32_e64 v15, v15, 0, s[4:5]
-; SI-NEXT:    v_mov_b32_e32 v16, s24
-; SI-NEXT:    v_cndmask_b32_e64 v16, v15, v16, s[6:7]
-; SI-NEXT:    v_cndmask_b32_e64 v17, v17, v18, s[6:7]
-; SI-NEXT:    v_mov_b32_e32 v15, s25
-; SI-NEXT:    v_bfi_b32 v18, s34, v14, v15
-; SI-NEXT:    v_add_f64 v[14:15], s[24:25], -v[16:17]
-; SI-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x9
+; SI-NEXT:    s_and_b32 s13, s17, s27
+; SI-NEXT:    v_add_f64 v[8:9], v[14:15], v[8:9]
+; SI-NEXT:    v_mov_b32_e32 v14, s9
+; SI-NEXT:    v_mov_b32_e32 v15, s13
+; SI-NEXT:    v_cmp_lt_i32_e64 vcc, s12, 0
+; SI-NEXT:    v_cndmask_b32_e32 v14, v14, v15, vcc
+; SI-NEXT:    v_mov_b32_e32 v15, s17
+; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s12, 51
+; SI-NEXT:    v_cndmask_b32_e64 v17, v14, v15, s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v14, s8
+; SI-NEXT:    v_cndmask_b32_e64 v14, v14, 0, vcc
+; SI-NEXT:    v_mov_b32_e32 v15, s16
+; SI-NEXT:    v_cndmask_b32_e64 v16, v14, v15, s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v14, s17
+; SI-NEXT:    v_bfi_b32 v18, s24, v18, v14
+; SI-NEXT:    v_add_f64 v[14:15], s[16:17], -v[16:17]
+; SI-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; SI-NEXT:    v_cmp_ge_f64_e64 vcc, |v[14:15]|, 0.5
-; SI-NEXT:    v_add_f64 v[14:15], s[26:27], -v[12:13]
-; SI-NEXT:    s_mov_b32 s15, 0xf000
+; SI-NEXT:    v_add_f64 v[14:15], s[18:19], -v[12:13]
+; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    v_cmp_ge_f64_e64 s[0:1], |v[14:15]|, 0.5
 ; SI-NEXT:    v_mov_b32_e32 v14, 0
 ; SI-NEXT:    v_cndmask_b32_e64 v15, 0, v19, s[0:1]
@@ -622,10 +592,10 @@ define amdgpu_kernel void @round_v8f64(<8 x double> addrspace(1)* %out, <8 x dou
 ; SI-NEXT:    v_mov_b32_e32 v12, 0
 ; SI-NEXT:    v_add_f64 v[12:13], v[16:17], v[12:13]
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[12:15], 0 offset:48
-; SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[12:15], 0 offset:32
-; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[12:15], 0 offset:16
-; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[12:15], 0
+; SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[4:7], 0 offset:48
+; SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[4:7], 0 offset:32
+; SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[4:7], 0 offset:16
+; SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; CI-LABEL: round_v8f64:

diff  --git a/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll b/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll
index f15236baaa83..8a21f75386bc 100644
--- a/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-select-ptr.ll
@@ -8,9 +8,7 @@
 ; GCN: s_load_dwordx2
 ; GCN: s_load_dwordx2
 
-; GCN: 	s_cmp_eq_u32
-; GCN:	s_cselect_b64
-
+; GCN: v_cmp_eq_u32
 ; GCN: v_cndmask_b32
 ; GCN: v_cndmask_b32
 

diff  --git a/llvm/test/CodeGen/AMDGPU/loop_break.ll b/llvm/test/CodeGen/AMDGPU/loop_break.ll
index f8e7e1a989de..4b3fb1aa0791 100644
--- a/llvm/test/CodeGen/AMDGPU/loop_break.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop_break.ll
@@ -206,33 +206,33 @@ define amdgpu_kernel void @constexpr_phi_cond_break_loop(i32 %arg) #0 {
 ; GCN:       ; %bb.0: ; %bb
 ; GCN-NEXT:    s_load_dword s3, s[0:1], 0x9
 ; GCN-NEXT:    s_mov_b64 s[0:1], 0
-; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_mov_b32 s2, lds at abs32@lo
+; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s3, v0
-; GCN-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-NEXT:    ; implicit-def: $sgpr6_sgpr7
-; GCN-NEXT:    ; implicit-def: $sgpr4
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    ; implicit-def: $sgpr4_sgpr5
+; GCN-NEXT:    ; implicit-def: $sgpr3
 ; GCN-NEXT:  BB2_1: ; %bb1
 ; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-NEXT:    s_cmp_lg_u32 lds at abs32@lo, 4
-; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GCN-NEXT:    s_andn2_b64 s[6:7], s[6:7], exec
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[8:9], s2, 4
+; GCN-NEXT:    s_andn2_b64 s[4:5], s[4:5], exec
 ; GCN-NEXT:    s_and_b64 s[8:9], s[8:9], exec
-; GCN-NEXT:    s_or_b64 s[6:7], s[6:7], s[8:9]
-; GCN-NEXT:    s_cmp_gt_i32 s4, -1
+; GCN-NEXT:    s_or_b64 s[4:5], s[4:5], s[8:9]
+; GCN-NEXT:    s_cmp_gt_i32 s3, -1
 ; GCN-NEXT:    s_cbranch_scc1 BB2_3
 ; GCN-NEXT:  ; %bb.2: ; %bb4
 ; GCN-NEXT:    ; in Loop: Header=BB2_1 Depth=1
-; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], 0
+; GCN-NEXT:    buffer_load_dword v1, off, s[4:7], 0
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cmp_ge_i32_e32 vcc, v0, v1
-; GCN-NEXT:    s_andn2_b64 s[6:7], s[6:7], exec
+; GCN-NEXT:    s_andn2_b64 s[4:5], s[4:5], exec
 ; GCN-NEXT:    s_and_b64 s[8:9], vcc, exec
-; GCN-NEXT:    s_or_b64 s[6:7], s[6:7], s[8:9]
+; GCN-NEXT:    s_or_b64 s[4:5], s[4:5], s[8:9]
 ; GCN-NEXT:  BB2_3: ; %Flow
 ; GCN-NEXT:    ; in Loop: Header=BB2_1 Depth=1
-; GCN-NEXT:    s_add_i32 s4, s4, 1
-; GCN-NEXT:    s_and_b64 s[8:9], exec, s[6:7]
+; GCN-NEXT:    s_add_i32 s3, s3, 1
+; GCN-NEXT:    s_and_b64 s[8:9], exec, s[4:5]
 ; GCN-NEXT:    s_or_b64 s[0:1], s[8:9], s[0:1]
 ; GCN-NEXT:    s_andn2_b64 exec, exec, s[0:1]
 ; GCN-NEXT:    s_cbranch_execnz BB2_1

diff  --git a/llvm/test/CodeGen/AMDGPU/min.ll b/llvm/test/CodeGen/AMDGPU/min.ll
index 64cc43f46297..b2c5ee2f6ec5 100644
--- a/llvm/test/CodeGen/AMDGPU/min.ll
+++ b/llvm/test/CodeGen/AMDGPU/min.ll
@@ -384,7 +384,7 @@ define amdgpu_kernel void @s_test_umin_ult_i32(i32 addrspace(1)* %out, i32 %a, i
 
 ; FUNC-LABEL: @v_test_umin_ult_i32_multi_use
 ; SI-NOT: v_min
-; GCN: s_cmp_lt_u32
+; GCN: v_cmp_lt_u32
 ; SI-NOT: v_min
 ; SI: v_cndmask_b32
 ; SI-NOT: v_min

diff  --git a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll
index 2d74d6ce6e6e..fdfb9cd3ab19 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll
+++ b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll
@@ -35,10 +35,7 @@ bb4:
 }
 
 ; GCN-LABEL: {{^}}negated_cond_dominated_blocks:
-
-
-; GCN: s_cmp_lg_u32
-; GCN: s_cselect_b64 [[CC1:[^,]+]], 1, 0
+; GCN:   v_cmp_ne_u32_e64 [[CC1:[^,]+]],
 ; GCN:   s_branch [[BB1:BB[0-9]+_[0-9]+]]
 ; GCN: [[BB0:BB[0-9]+_[0-9]+]]
 ; GCN-NOT: v_cndmask_b32

diff  --git a/llvm/test/CodeGen/AMDGPU/or.ll b/llvm/test/CodeGen/AMDGPU/or.ll
index a09226f24ac6..88e01c96e79c 100644
--- a/llvm/test/CodeGen/AMDGPU/or.ll
+++ b/llvm/test/CodeGen/AMDGPU/or.ll
@@ -262,7 +262,7 @@ define amdgpu_kernel void @or_i1(i32 addrspace(1)* %out, float addrspace(1)* %in
 }
 
 ; FUNC-LABEL: {{^}}s_or_i1:
-; SI: s_or_b64 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
+; SI: s_or_b64 s[{{[0-9]+:[0-9]+}}], vcc, s[{{[0-9]+:[0-9]+}}]
 define amdgpu_kernel void @s_or_i1(i1 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
   %cmp0 = icmp eq i32 %a, %b
   %cmp1 = icmp eq i32 %c, %d

diff  --git a/llvm/test/CodeGen/AMDGPU/sad.ll b/llvm/test/CodeGen/AMDGPU/sad.ll
index 51614d712c13..3a4a2d07772c 100644
--- a/llvm/test/CodeGen/AMDGPU/sad.ll
+++ b/llvm/test/CodeGen/AMDGPU/sad.ll
@@ -133,9 +133,8 @@ define amdgpu_kernel void @v_sad_u32_multi_use_sub_pat2(i32 addrspace(1)* %out,
 }
 
 ; GCN-LABEL: {{^}}v_sad_u32_multi_use_select_pat2:
-; GCN-DAG: s_cmp_gt_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b64 vcc, 1, 0
 ; GCN: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
+; GCN-DAG: v_cmp_gt_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
 ; GCN-DAG: s_sub_i32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_multi_use_select_pat2(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
   %icmp0 = icmp ugt i32 %a, %b
@@ -255,7 +254,7 @@ define amdgpu_kernel void @v_sad_u32_i8_pat2(i8 addrspace(1)* %out) {
 
 ; GCN-LABEL: {{^}}s_sad_u32_i8_pat2:
 ; GCN: s_load_dword
-; GCN-DAG: s_bfe_u32
+; GCN: s_bfe_u32
 ; GCN-DAG: s_sub_i32
 ; GCN-DAG: s_and_b32
 ; GCN-DAG: s_sub_i32
@@ -274,9 +273,8 @@ define amdgpu_kernel void @s_sad_u32_i8_pat2(i8 addrspace(1)* %out, i8 zeroext %
 }
 
 ; GCN-LABEL: {{^}}v_sad_u32_mismatched_operands_pat1:
+; GCN: v_cmp_le_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
 ; GCN: s_max_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_cmp_le_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_cselect_b64 vcc, 1, 0
 ; GCN: v_sub_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
 ; GCN: v_add_i32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}
 define amdgpu_kernel void @v_sad_u32_mismatched_operands_pat1(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {

diff  --git a/llvm/test/CodeGen/AMDGPU/saddo.ll b/llvm/test/CodeGen/AMDGPU/saddo.ll
index c1260ad89c96..f0a7a8067081 100644
--- a/llvm/test/CodeGen/AMDGPU/saddo.ll
+++ b/llvm/test/CodeGen/AMDGPU/saddo.ll
@@ -93,15 +93,14 @@ define amdgpu_kernel void @s_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
 ; SI-NEXT:    s_mov_b32 s2, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s0, s4
-; SI-NEXT:    s_add_i32 s12, s8, s9
-; SI-NEXT:    s_cmp_lt_i32 s9, 0
-; SI-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; SI-NEXT:    s_cmp_lt_i32 s12, s8
+; SI-NEXT:    v_cmp_lt_i32_e64 s[10:11], s9, 0
+; SI-NEXT:    s_add_i32 s9, s8, s9
+; SI-NEXT:    v_mov_b32_e32 v0, s8
 ; SI-NEXT:    s_mov_b32 s1, s5
-; SI-NEXT:    v_mov_b32_e32 v0, s12
-; SI-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; SI-NEXT:    v_cmp_lt_i32_e32 vcc, s9, v0
+; SI-NEXT:    v_mov_b32_e32 v0, s9
 ; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
-; SI-NEXT:    s_xor_b64 s[0:1], s[10:11], s[8:9]
+; SI-NEXT:    s_xor_b64 s[0:1], s[10:11], vcc
 ; SI-NEXT:    s_mov_b32 s4, s6
 ; SI-NEXT:    s_mov_b32 s5, s7
 ; SI-NEXT:    s_mov_b32 s6, s2
@@ -117,14 +116,13 @@ define amdgpu_kernel void @s_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
 ; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s4
-; VI-NEXT:    s_add_i32 s4, s0, s1
-; VI-NEXT:    s_cmp_lt_i32 s1, 0
-; VI-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; VI-NEXT:    s_cmp_lt_i32 s4, s0
-; VI-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; VI-NEXT:    v_cmp_lt_i32_e64 s[2:3], s1, 0
+; VI-NEXT:    s_add_i32 s1, s0, s1
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_cmp_lt_i32_e32 vcc, s1, v4
+; VI-NEXT:    v_mov_b32_e32 v4, s1
 ; VI-NEXT:    v_mov_b32_e32 v1, s5
-; VI-NEXT:    v_mov_b32_e32 v4, s4
-; VI-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
+; VI-NEXT:    s_xor_b64 s[0:1], s[2:3], vcc
 ; VI-NEXT:    flat_store_dword v[0:1], v4
 ; VI-NEXT:    v_mov_b32_e32 v2, s6
 ; VI-NEXT:    v_mov_b32_e32 v3, s7
@@ -138,14 +136,13 @@ define amdgpu_kernel void @s_saddo_i32(i32 addrspace(1)* %out, i1 addrspace(1)*
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s4
-; GFX9-NEXT:    s_add_i32 s4, s0, s1
-; GFX9-NEXT:    s_cmp_lt_i32 s1, 0
-; GFX9-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; GFX9-NEXT:    s_cmp_lt_i32 s4, s0
-; GFX9-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GFX9-NEXT:    v_cmp_lt_i32_e64 s[2:3], s1, 0
+; GFX9-NEXT:    s_add_i32 s1, s0, s1
+; GFX9-NEXT:    v_mov_b32_e32 v4, s0
+; GFX9-NEXT:    v_cmp_lt_i32_e32 vcc, s1, v4
+; GFX9-NEXT:    v_mov_b32_e32 v4, s1
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:    v_mov_b32_e32 v4, s4
-; GFX9-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
+; GFX9-NEXT:    s_xor_b64 s[0:1], s[2:3], vcc
 ; GFX9-NEXT:    global_store_dword v[0:1], v4, off
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s6
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s7

diff  --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index b7a18cc4f3c7..9ad18f412708 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -155,29 +155,27 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_subb_u32 s11, s7, s2
 ; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[8:9], s[0:1]
 ; GCN-IR-NEXT:    s_sub_u32 s6, s0, s8
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s6
 ; GCN-IR-NEXT:    s_subb_u32 s7, s1, s8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[10:11], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[6:7], 0
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[12:13], s[0:1]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s10
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s11
-; GCN-IR-NEXT:    s_cmp_eq_u32 s11, 0
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s7
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s10
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s11
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s11, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[12:13], 0, 0, vcc
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[14:15], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[12:13]
 ; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[12:13], s[0:1], -1
@@ -1009,55 +1007,51 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48
 ;
 ; GCN-IR-LABEL: s_test_sdiv24_48:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xb
 ; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xc
-; GCN-IR-NEXT:    s_load_dword s4, s[0:1], 0xd
-; GCN-IR-NEXT:    s_load_dword s5, s[0:1], 0xe
+; GCN-IR-NEXT:    s_load_dword s6, s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dword s0, s[0:1], 0xe
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_sext_i32_i16 s3, s3
-; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[2:3], 24
+; GCN-IR-NEXT:    s_sext_i32_i16 s7, s0
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 24
 ; GCN-IR-NEXT:    s_ashr_i32 s2, s3, 31
-; GCN-IR-NEXT:    s_sext_i32_i16 s5, s5
 ; GCN-IR-NEXT:    s_mov_b32 s3, s2
-; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[4:5], 24
-; GCN-IR-NEXT:    s_ashr_i32 s4, s5, 31
-; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[2:3], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s10, s6, s2
-; GCN-IR-NEXT:    s_mov_b32 s5, s4
-; GCN-IR-NEXT:    s_subb_u32 s11, s7, s2
-; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s6, s6, s4
-; GCN-IR-NEXT:    s_subb_u32 s7, s7, s4
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[8:9], s[12:13]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
-; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s8
-; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s10
-; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s7
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
-; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s11
-; GCN-IR-NEXT:    s_cmp_eq_u32 s11, 0
+; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[6:7], 24
+; GCN-IR-NEXT:    s_ashr_i32 s6, s7, 31
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
+; GCN-IR-NEXT:    s_sub_u32 s10, s0, s2
+; GCN-IR-NEXT:    s_mov_b32 s7, s6
+; GCN-IR-NEXT:    s_subb_u32 s11, s1, s2
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[6:7], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s8, s0, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s8
+; GCN-IR-NEXT:    s_subb_u32 s9, s1, s6
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s9
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s10
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s11
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s8
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s11, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[14:15], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x9
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[12:13], vcc
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[12:13], s[0:1], -1
 ; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], vcc
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
-; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_mov_b64 vcc, vcc
 ; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
@@ -1070,10 +1064,10 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[10:11], v4
-; GCN-IR-NEXT:    s_add_u32 s10, s6, -1
+; GCN-IR-NEXT:    s_add_u32 s10, s8, -1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v2, v3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s11, s7, -1
+; GCN-IR-NEXT:    s_addc_u32 s11, s9, -1
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
@@ -1088,9 +1082,9 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s10, v6
 ; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s6, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v10, s8, v8
 ; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s7, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v11, s9, v8
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
 ; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
@@ -1118,16 +1112,16 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
 ; GCN-IR-NEXT:  BB9_7: ; %Flow4
-; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[4:5], s[2:3]
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[6:7], s[2:3]
 ; GCN-IR-NEXT:    v_xor_b32_e32 v0, s0, v0
 ; GCN-IR-NEXT:    v_xor_b32_e32 v1, s1, v1
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s1
 ; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-IR-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-IR-NEXT:    s_mov_b32 s10, -1
-; GCN-IR-NEXT:    buffer_store_short v1, off, s[8:11], 0 offset:4
-; GCN-IR-NEXT:    buffer_store_dword v0, off, s[8:11], 0
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:4
+; GCN-IR-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i48 %x, 24
   %2 = ashr i48 %y, 24
@@ -1269,14 +1263,13 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_mov_b32 s3, s2
 ; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[6:7]
 ; GCN-IR-NEXT:    s_sub_u32 s6, s0, s2
-; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
 ; GCN-IR-NEXT:    s_subb_u32 s7, s1, s2
+; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s7
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s8
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
 ; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[8:9], 0, -1, vcc

diff  --git a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
index db70252a808c..e09c8177f813 100644
--- a/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-fabs-fneg-extract.ll
@@ -166,8 +166,7 @@ define amdgpu_kernel void @add_select_posk_posk_f32(i32 %c) #0 {
 ; GCN: buffer_load_dword [[X:v[0-9]+]]
 ; GCN: buffer_load_dword [[Y:v[0-9]+]]
 
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cselect_b64 [[VCC:.*]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[VCC:.*]], s{{[0-9]+}}, 0
 ; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -1.0, |[[X]]|, [[VCC]]
 ; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
 define amdgpu_kernel void @add_select_negk_fabs_f32(i32 %c) #0 {
@@ -186,8 +185,7 @@ define amdgpu_kernel void @add_select_negk_fabs_f32(i32 %c) #0 {
 ; GCN-DAG: buffer_load_dword [[Y:v[0-9]+]]
 ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0xc4800000
 
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cselect_b64 [[VCC:.*]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[VCC:.*]], s{{[0-9]+}}, 0
 ; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], [[K]], |[[X]]|, [[VCC]]
 ; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[Y]]
 define amdgpu_kernel void @add_select_negliteralk_fabs_f32(i32 %c) #0 {
@@ -222,8 +220,8 @@ define amdgpu_kernel void @add_select_fabs_posk_f32(i32 %c) #0 {
 ; GCN-LABEL: {{^}}add_select_posk_fabs_f32:
 ; GCN: buffer_load_dword [[X:v[0-9]+]]
 ; GCN: buffer_load_dword [[Y:v[0-9]+]]
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cselect_b64 vcc, 1, 0
+
+; GCN: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
 ; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 1.0, [[X]], vcc
 ; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[SELECT]]|, [[Y]]
 define amdgpu_kernel void @add_select_posk_fabs_f32(i32 %c) #0 {
@@ -409,8 +407,7 @@ define amdgpu_kernel void @add_select_fneg_neginv2pi_f32(i32 %c) #0 {
 ; GCN-LABEL: {{^}}add_select_negk_negk_f32:
 ; GCN: buffer_load_dword [[X:v[0-9]+]]
 
-; GCN: s_cmp_eq_u32
-; GCN: s_cselect_b64
+; GCN: v_cmp_eq_u32_e64
 ; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -1.0, -2.0, s
 ; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[X]]
 define amdgpu_kernel void @add_select_negk_negk_f32(i32 %c) #0 {
@@ -427,8 +424,7 @@ define amdgpu_kernel void @add_select_negk_negk_f32(i32 %c) #0 {
 ; GCN-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0xc5800000
 ; GCN-DAG: buffer_load_dword [[X:v[0-9]+]]
 
-; GCN: s_cmp_eq_u32
-; GCN: s_cselect_b64
+; GCN: v_cmp_eq_u32_e64
 ; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], [[K1]], [[K0]], vcc
 ; GCN: v_add_f32_e32 v{{[0-9]+}}, [[SELECT]], [[X]]
 define amdgpu_kernel void @add_select_negliteralk_negliteralk_f32(i32 %c) #0 {
@@ -459,8 +455,7 @@ define amdgpu_kernel void @add_select_fneg_negk_negk_f32(i32 %c) #0 {
 ; GCN: buffer_load_dword [[X:v[0-9]+]]
 ; GCN: buffer_load_dword [[Y:v[0-9]+]]
 
-; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cselect_b64 vcc, 1, 0
+; GCN: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
 ; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 1.0, [[X]], vcc
 ; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
 define amdgpu_kernel void @add_select_negk_fneg_f32(i32 %c) #0 {
@@ -495,8 +490,7 @@ define amdgpu_kernel void @add_select_fneg_posk_f32(i32 %c) #0 {
 ; GCN: buffer_load_dword [[X:v[0-9]+]]
 ; GCN: buffer_load_dword [[Y:v[0-9]+]]
 
-; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cselect_b64 vcc, 1, 0
+; GCN: v_cmp_ne_u32_e64 vcc, s{{[0-9]+}}, 0
 ; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], -1.0, [[X]], vcc
 ; GCN: v_sub_f32_e32 v{{[0-9]+}}, [[Y]], [[SELECT]]
 define amdgpu_kernel void @add_select_posk_fneg_f32(i32 %c) #0 {
@@ -638,8 +632,7 @@ define amdgpu_kernel void @add_select_negfabs_neg_f32(i32 %c) #0 {
 ; GCN: buffer_load_dword [[X:v[0-9]+]]
 ; GCN: buffer_load_dword [[Y:v[0-9]+]]
 
-; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 0
-; GCN: s_cselect_b64 [[VCC:.*]], 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 [[VCC:.*]], s{{[0-9]+}}, 0
 ; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -4.0, |[[X]]|, [[VCC]]
 ; GCN: v_mul_f32_e64 v{{[0-9]+}}, -[[SELECT]], [[Y]]
 define amdgpu_kernel void @mul_select_negfabs_posk_f32(i32 %c) #0 {
@@ -658,8 +651,7 @@ define amdgpu_kernel void @mul_select_negfabs_posk_f32(i32 %c) #0 {
 ; GCN: buffer_load_dword [[X:v[0-9]+]]
 ; GCN: buffer_load_dword [[Y:v[0-9]+]]
 
-; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cselect_b64 [[VCC:.*]], 1, 0
+; GCN-DAG: v_cmp_ne_u32_e64 [[VCC:.*]], s{{[0-9]+}}, 0
 ; GCN: v_cndmask_b32_e64 [[SELECT:v[0-9]+]], -4.0, |[[X]]|, [[VCC]]
 ; GCN: v_mul_f32_e64 v{{[0-9]+}}, -[[SELECT]], [[Y]]
 define amdgpu_kernel void @mul_select_posk_negfabs_f32(i32 %c) #0 {
@@ -696,8 +688,7 @@ define amdgpu_kernel void @mul_select_negfabs_negk_f32(i32 %c) #0 {
 ; GCN: buffer_load_dword [[X:v[0-9]+]]
 ; GCN: buffer_load_dword [[Y:v[0-9]+]]
 
-; GCN: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; GCN: s_cselect_b64 vcc, 1, 0
+; GCN: v_cmp_ne_u32_e64 vcc
 ; GCN: v_cndmask_b32_e32 [[SELECT:v[0-9]+]], 4.0, [[X]], vcc
 ; GCN: v_mul_f32_e64 v{{[0-9]+}}, -|[[SELECT]]|, [[Y]]
 define amdgpu_kernel void @mul_select_negk_negfabs_f32(i32 %c) #0 {

diff  --git a/llvm/test/CodeGen/AMDGPU/select-opt.ll b/llvm/test/CodeGen/AMDGPU/select-opt.ll
index 83c68f3e8c6b..24df126e4caf 100644
--- a/llvm/test/CodeGen/AMDGPU/select-opt.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-opt.ll
@@ -5,11 +5,9 @@
 ; scalar compares, we don't want to use multiple condition registers.
 
 ; GCN-LABEL: {{^}}opt_select_i32_and_cmp_i32:
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b64 [[CMP2:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: s_and_b64 vcc, [[CMP1]], [[CMP2]]
+; GCN-DAG: v_cmp_ne_u32_e32 vcc,
+; GCN-DAG: v_cmp_ne_u32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]]
+; GCN: s_and_b64 vcc, vcc, [[CMP1]]
 ; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
 ; GCN-NOT: [[RESULT]]
 ; GCN: buffer_store_dword [[RESULT]]
@@ -39,11 +37,9 @@ define amdgpu_kernel void @opt_select_i32_and_cmp_f32(i32 addrspace(1)* %out, fl
 }
 
 ; GCN-LABEL: {{^}}opt_select_i64_and_cmp_i32:
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b64 [[CMP2:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: s_and_b64 vcc, [[CMP1]], [[CMP2]]
+; GCN-DAG: v_cmp_ne_u32_e32 vcc,
+; GCN-DAG: v_cmp_ne_u32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]]
+; GCN: s_and_b64 vcc, vcc, [[CMP1]]
 ; GCN: v_cndmask_b32_e32 v[[RESULT1:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
 ; GCN: v_cndmask_b32_e32 v[[RESULT0:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
 ; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT0]]:[[RESULT1]]{{\]}}
@@ -73,11 +69,9 @@ define amdgpu_kernel void @opt_select_i64_and_cmp_f32(i64 addrspace(1)* %out, fl
 }
 
 ; GCN-LABEL: {{^}}opt_select_i32_or_cmp_i32:
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b64 [[CMP2:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: s_or_b64 vcc, [[CMP1]], [[CMP2]]
+; GCN-DAG: v_cmp_ne_u32_e32 vcc,
+; GCN-DAG: v_cmp_ne_u32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]]
+; GCN: s_or_b64 vcc, vcc, [[CMP1]]
 ; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
 ; GCN-NOT: [[RESULT]]
 ; GCN: buffer_store_dword [[RESULT]]
@@ -108,11 +102,9 @@ define amdgpu_kernel void @opt_select_i32_or_cmp_f32(i32 addrspace(1)* %out, flo
 }
 
 ; GCN-LABEL: {{^}}opt_select_i64_or_cmp_i32:
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN-DAG: s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN-DAG: s_cselect_b64 [[CMP2:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: s_or_b64 vcc, [[CMP1]], [[CMP2]]
+; GCN-DAG: v_cmp_ne_u32_e32 vcc,
+; GCN-DAG: v_cmp_ne_u32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]]
+; GCN: s_or_b64 vcc, vcc, [[CMP1]]
 ; GCN: v_cndmask_b32_e32 v[[RESULT1:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
 ; GCN: v_cndmask_b32_e32 v[[RESULT0:[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, vcc
 ; GCN: buffer_store_dwordx2 v{{\[}}[[RESULT0]]:[[RESULT1]]{{\]}}

diff  --git a/llvm/test/CodeGen/AMDGPU/select-vectors.ll b/llvm/test/CodeGen/AMDGPU/select-vectors.ll
index 56b525ef068b..4c136d09c4ab 100644
--- a/llvm/test/CodeGen/AMDGPU/select-vectors.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-vectors.ll
@@ -180,8 +180,7 @@ define amdgpu_kernel void @s_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32
 
 ; GCN-LABEL: {{^}}v_select_v4i32:
 ; GCN: buffer_load_dwordx4
-; GCN: s_cmp_lt_u32 s{{[0-9]+}}, 32
-; GCN: s_cselect_b64 vcc, 1, 0
+; GCN: v_cmp_lt_u32_e64 vcc, s{{[0-9]+}}, 32
 ; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
 ; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
 ; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
@@ -219,8 +218,8 @@ define amdgpu_kernel void @select_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32>
 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[AHI]]
 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[BHI]]
 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[ALO]]
-; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 0{{$}}
-; GCN-DAG: s_cselect_b64 vcc, 1, 0
+; GCN-DAG: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}}
+
 ; GCN-DAG: v_cndmask_b32_e32
 ; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[BLO]]
 ; GCN-DAG: v_cndmask_b32_e32
@@ -233,8 +232,7 @@ define amdgpu_kernel void @s_select_v2f32(<2 x float> addrspace(1)* %out, <2 x f
 }
 
 ; GCN-LABEL: {{^}}s_select_v3f32:
-; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 0{{$}}
-; GCN-DAG: s_cselect_b64 vcc, 1, 0
+; GCN: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}}
 
 ; GCN: v_cndmask_b32_e32
 ; GCN: v_cndmask_b32_e32
@@ -251,8 +249,7 @@ define amdgpu_kernel void @s_select_v3f32(<3 x float> addrspace(1)* %out, <3 x f
 ; GCN-LABEL: {{^}}s_select_v4f32:
 ; GCN: s_load_dwordx4
 ; GCN: s_load_dwordx4
-; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 0{{$}}
-; GCN-DAG: s_cselect_b64 vcc, 1, 0
+; GCN: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}}
 
 ; GCN: v_cndmask_b32_e32
 ; GCN: v_cndmask_b32_e32
@@ -269,9 +266,7 @@ define amdgpu_kernel void @s_select_v4f32(<4 x float> addrspace(1)* %out, <4 x f
 
 ; GCN-LABEL: {{^}}v_select_v4f32:
 ; GCN: buffer_load_dwordx4
-
-; GCN-DAG: s_cmp_lt_u32 s{{[0-9]+}}, 32
-; GCN-DAG: s_cselect_b64 vcc, 1, 0
+; GCN: v_cmp_lt_u32_e64 vcc, s{{[0-9]+}}, 32
 ; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
 ; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
 ; GCN: v_cndmask_b32_e32 v{{[0-9]+}}, 0, v{{[0-9]+}}
@@ -287,8 +282,7 @@ bb:
 }
 
 ; GCN-LABEL: {{^}}s_select_v5f32:
-; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 0{{$}}
-; GCN-DAG: s_cselect_b64 vcc, 1, 0
+; GCN: v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0{{$}}
 
 ; GCN: v_cndmask_b32_e32
 ; GCN: v_cndmask_b32_e32

diff  --git a/llvm/test/CodeGen/AMDGPU/selectcc-opt.ll b/llvm/test/CodeGen/AMDGPU/selectcc-opt.ll
index b32bbe1a6928..fe7a350a1d9f 100644
--- a/llvm/test/CodeGen/AMDGPU/selectcc-opt.ll
+++ b/llvm/test/CodeGen/AMDGPU/selectcc-opt.ll
@@ -68,7 +68,7 @@ entry:
 }
 
 ; FUNC-LABEL: {{^}}selectcc_bool:
-; SI: s_cmp_lg_u32
+; SI: v_cmp_ne_u32
 ; SI: v_cndmask_b32_e64
 ; SI-NOT: cmp
 ; SI-NOT: cndmask

diff  --git a/llvm/test/CodeGen/AMDGPU/selectcc.ll b/llvm/test/CodeGen/AMDGPU/selectcc.ll
index cbd6a1759816..73e4368c52ba 100644
--- a/llvm/test/CodeGen/AMDGPU/selectcc.ll
+++ b/llvm/test/CodeGen/AMDGPU/selectcc.ll
@@ -1,6 +1,6 @@
 ; RUN: llc -verify-machineinstrs -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefixes=GCN,SI -check-prefix=FUNC %s
-; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefixes=GCN,VI -check-prefix=FUNC %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
 
 ; FUNC-LABEL: {{^}}selectcc_i64:
 ; EG: XOR_INT
@@ -8,11 +8,9 @@
 ; EG: OR_INT
 ; EG: CNDE_INT
 ; EG: CNDE_INT
-; SI: v_cmp_eq_u64_e32
-; VI: s_cmp_eq_u64
-; VI: s_cselect_b64 vcc, 1, 0
-; GCN: v_cndmask
-; GCN: v_cndmask
+; SI: v_cmp_eq_u64
+; SI: v_cndmask
+; SI: v_cndmask
 define amdgpu_kernel void @selectcc_i64(i64 addrspace(1) * %out, i64 %lhs, i64 %rhs, i64 %true, i64 %false) {
 entry:
   %0 = icmp eq i64 %lhs, %rhs

diff  --git a/llvm/test/CodeGen/AMDGPU/setcc-opt.ll b/llvm/test/CodeGen/AMDGPU/setcc-opt.ll
index 4632246f8c6c..d194240ff3e6 100644
--- a/llvm/test/CodeGen/AMDGPU/setcc-opt.ll
+++ b/llvm/test/CodeGen/AMDGPU/setcc-opt.ll
@@ -4,9 +4,8 @@
 
 ; FUNC-LABEL: {{^}}sext_bool_icmp_eq_0:
 ; GCN-NOT: v_cmp
-; GCN: 	s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: 	s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[MASK]]
+; GCN: v_cmp_ne_u32_e32 vcc,
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
 ; GCN-NEXT:buffer_store_byte [[RESULT]]
 ; GCN-NEXT: s_endpgm
 
@@ -22,9 +21,8 @@ define amdgpu_kernel void @sext_bool_icmp_eq_0(i1 addrspace(1)* %out, i32 %a, i3
 
 ; FUNC-LABEL: {{^}}sext_bool_icmp_ne_0:
 ; GCN-NOT: v_cmp
-; GCN: 	s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: 	s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[MASK]]
+; GCN: v_cmp_ne_u32_e32 vcc,
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
 ; GCN-NEXT: buffer_store_byte [[RESULT]]
 ; GCN-NEXT: s_endpgm
 
@@ -40,9 +38,8 @@ define amdgpu_kernel void @sext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i3
 
 ; FUNC-LABEL: {{^}}sext_bool_icmp_eq_neg1:
 ; GCN-NOT: v_cmp
-; GCN: 	s_cmp_eq_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: 	s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[MASK]]
+; GCN: v_cmp_eq_u32_e32 vcc,
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
 ; GCN-NEXT: buffer_store_byte [[RESULT]]
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @sext_bool_icmp_eq_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
@@ -55,9 +52,8 @@ define amdgpu_kernel void @sext_bool_icmp_eq_neg1(i1 addrspace(1)* %out, i32 %a,
 
 ; FUNC-LABEL: {{^}}sext_bool_icmp_ne_neg1:
 ; GCN-NOT: v_cmp
-; GCN: 	s_cmp_eq_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: 	s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[MASK]]
+; GCN: v_cmp_eq_u32_e32 vcc,
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
 ; GCN-NEXT: buffer_store_byte [[RESULT]]
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @sext_bool_icmp_ne_neg1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
@@ -70,9 +66,8 @@ define amdgpu_kernel void @sext_bool_icmp_ne_neg1(i1 addrspace(1)* %out, i32 %a,
 
 ; FUNC-LABEL: {{^}}zext_bool_icmp_eq_0:
 ; GCN-NOT: v_cmp
-; GCN: 	s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: 	s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[MASK]]
+; GCN: v_cmp_ne_u32_e32 vcc,
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
 ; GCN-NEXT: buffer_store_byte [[RESULT]]
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @zext_bool_icmp_eq_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
@@ -85,9 +80,8 @@ define amdgpu_kernel void @zext_bool_icmp_eq_0(i1 addrspace(1)* %out, i32 %a, i3
 
 ; FUNC-LABEL: {{^}}zext_bool_icmp_ne_0:
 ; GCN-NOT: v_cmp
-; GCN: 	s_cmp_lg_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: 	s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[MASK]]
+; GCN: v_cmp_ne_u32_e32 vcc,
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
 ; GCN-NEXT: buffer_store_byte [[RESULT]]
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @zext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
@@ -100,9 +94,8 @@ define amdgpu_kernel void @zext_bool_icmp_ne_0(i1 addrspace(1)* %out, i32 %a, i3
 
 ; FUNC-LABEL: {{^}}zext_bool_icmp_eq_1:
 ; GCN-NOT: v_cmp
-; GCN: 	s_cmp_eq_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: 	s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[MASK]]
+; GCN: v_cmp_eq_u32_e32 vcc,
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
 ; GCN-NEXT: buffer_store_byte [[RESULT]]
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @zext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
@@ -115,9 +108,8 @@ define amdgpu_kernel void @zext_bool_icmp_eq_1(i1 addrspace(1)* %out, i32 %a, i3
 
 ; FUNC-LABEL: {{^}}zext_bool_icmp_ne_1:
 ; GCN-NOT: v_cmp
-; GCN: 	s_cmp_eq_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: 	s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[MASK]]
+; GCN: v_cmp_eq_u32_e32 vcc,
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
 ; GCN-NEXT: buffer_store_byte [[RESULT]]
 define amdgpu_kernel void @zext_bool_icmp_ne_1(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
   %icmp0 = icmp ne i32 %a, %b
@@ -157,16 +149,14 @@ define amdgpu_kernel void @zext_bool_icmp_ne_neg1(i1 addrspace(1)* %out, i32 %a,
 ; SI: s_load_dword [[VALUE:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb
 ; VI: s_load_dword [[VALUE:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
 ; GCN: s_movk_i32 [[K255:s[0-9]+]], 0xff
+; GCN-DAG: v_mov_b32_e32 [[VK255:v[0-9]+]], [[K255]]
 ; SI-DAG: s_and_b32 [[B:s[0-9]+]], [[VALUE]], [[K255]]
-; SI: s_cmp_lg_u32 [[B]], [[K255]]
-; SI: s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
+; SI: v_cmp_ne_u32_e32 vcc, [[B]], [[VK255]]
 
-; VI: v_mov_b32_e32 [[VK255:v[0-9]+]], [[K255]]
 ; VI-DAG: v_and_b32_e32 [[B:v[0-9]+]], [[VALUE]], [[VK255]]
 ; VI: v_cmp_ne_u16_e32 vcc, [[K255]], [[B]]
 
-; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[MASK]]
-; VI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
 ; GCN: buffer_store_byte [[RESULT]]
 ; GCN: s_endpgm
 define amdgpu_kernel void @cmp_zext_k_i8max(i1 addrspace(1)* %out, i8 %b) nounwind {
@@ -210,9 +200,9 @@ define void @v_cmp_sext_k_neg1_i8_sext_arg(i8 signext %b) nounwind {
 ; VI: s_load_dword [[VAL:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c
 ; GCN: s_movk_i32 [[K:s[0-9]+]], 0xff
 ; GCN-DAG: s_and_b32 [[B:s[0-9]+]], [[VAL]], [[K]]
-; GCN: s_cmp_lg_u32 [[B]], [[K]]{{$}}
-; SI: s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[MASK]]
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], [[K]]
+; GCN: v_cmp_ne_u32_e32 vcc, [[B]], [[VK]]{{$}}
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
 ; GCN: buffer_store_byte [[RESULT]]
 ; GCN: s_endpgm
 define amdgpu_kernel void @cmp_sext_k_neg1_i8_arg(i1 addrspace(1)* %out, i8 %b) nounwind {

diff  --git a/llvm/test/CodeGen/AMDGPU/setcc.ll b/llvm/test/CodeGen/AMDGPU/setcc.ll
index a8033be5ec7c..91fec72cab51 100644
--- a/llvm/test/CodeGen/AMDGPU/setcc.ll
+++ b/llvm/test/CodeGen/AMDGPU/setcc.ll
@@ -7,12 +7,8 @@ declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
 ; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW]}}, KC0[3].X, KC0[3].Z
 ; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW]}}, KC0[2].W, KC0[3].Y
 
-; GCN: s_cmp_eq_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_cselect_b64 [[MASK1:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: s_cmp_eq_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_cselect_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1, [[MASK1]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1, [[MASK2]]
+; GCN: v_cmp_eq_u32_e32
+; GCN: v_cmp_eq_u32_e32
 define amdgpu_kernel void @setcc_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
   %result = icmp eq <2 x i32> %a, %b
   %sext = sext <2 x i1> %result to <2 x i32>
@@ -26,19 +22,10 @@ define amdgpu_kernel void @setcc_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %
 ; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 ; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 
-; GCN: s_cmp_eq_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_cselect_b64 [[MASK1:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: s_cmp_eq_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_cselect_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: s_cmp_eq_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_cselect_b64 [[MASK3:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: s_cmp_eq_u32 s{{[0-9]+}}, s{{[0-9]+}}
-; GCN: s_cselect_b64 [[MASK4:s\[[0-9]+:[0-9]+\]]], 1, 0
-
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1, [[MASK1]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1, [[MASK2]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1, [[MASK3]]
-; GCN-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1, [[MASK4]]
+; GCN: v_cmp_eq_u32_e32
+; GCN: v_cmp_eq_u32_e32
+; GCN: v_cmp_eq_u32_e32
+; GCN: v_cmp_eq_u32_e32
 define amdgpu_kernel void @setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
   %a = load <4 x i32>, <4 x i32> addrspace(1)* %in
@@ -244,7 +231,7 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_eq:
 ; R600: SETE_INT
-; GCN: s_cmp_eq_u32
+; GCN: v_cmp_eq_u32
 define amdgpu_kernel void @i32_eq(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp eq i32 %a, %b
@@ -255,7 +242,7 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_ne:
 ; R600: SETNE_INT
-; GCN: s_cmp_lg_u32
+; GCN: v_cmp_ne_u32
 define amdgpu_kernel void @i32_ne(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp ne i32 %a, %b
@@ -266,7 +253,7 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_ugt:
 ; R600: SETGT_UINT
-; GCN: s_cmp_gt_u32
+; GCN: v_cmp_gt_u32
 define amdgpu_kernel void @i32_ugt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp ugt i32 %a, %b
@@ -277,7 +264,7 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_uge:
 ; R600: SETGE_UINT
-; GCN: s_cmp_ge_u32
+; GCN: v_cmp_ge_u32
 define amdgpu_kernel void @i32_uge(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp uge i32 %a, %b
@@ -288,7 +275,7 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_ult:
 ; R600: SETGT_UINT
-; GCN: s_cmp_lt_u32
+; GCN: v_cmp_lt_u32
 define amdgpu_kernel void @i32_ult(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp ult i32 %a, %b
@@ -299,7 +286,7 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_ule:
 ; R600: SETGE_UINT
-; GCN: s_cmp_le_u32
+; GCN: v_cmp_le_u32
 define amdgpu_kernel void @i32_ule(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp ule i32 %a, %b
@@ -310,7 +297,7 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_sgt:
 ; R600: SETGT_INT
-; GCN: s_cmp_gt_i32
+; GCN: v_cmp_gt_i32
 define amdgpu_kernel void @i32_sgt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp sgt i32 %a, %b
@@ -321,7 +308,7 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_sge:
 ; R600: SETGE_INT
-; GCN: s_cmp_ge_i32
+; GCN: v_cmp_ge_i32
 define amdgpu_kernel void @i32_sge(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp sge i32 %a, %b
@@ -332,7 +319,7 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_slt:
 ; R600: SETGT_INT
-; GCN: s_cmp_lt_i32
+; GCN: v_cmp_lt_i32
 define amdgpu_kernel void @i32_slt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp slt i32 %a, %b
@@ -343,7 +330,7 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_sle:
 ; R600: SETGE_INT
-; GCN: s_cmp_le_i32
+; GCN: v_cmp_le_i32
 define amdgpu_kernel void @i32_sle(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp sle i32 %a, %b
@@ -430,8 +417,8 @@ bb2:
 }
 
 ; FUNC-LABEL: setcc_v2i32_expand
-; GCN: s_cmp_gt_i32
-; GCN: s_cmp_gt_i32
+; GCN: v_cmp_gt_i32
+; GCN: v_cmp_gt_i32
 define amdgpu_kernel void @setcc_v2i32_expand(
   <2 x i32> addrspace(1)* %a,
   <2 x i32> addrspace(1)* %b,
@@ -455,10 +442,10 @@ entry:
 }
 
 ; FUNC-LABEL: setcc_v4i32_expand
-; GCN: s_cmp_gt_i32
-; GCN: s_cmp_gt_i32
-; GCN: s_cmp_gt_i32
-; GCN: s_cmp_gt_i32
+; GCN: v_cmp_gt_i32
+; GCN: v_cmp_gt_i32
+; GCN: v_cmp_gt_i32
+; GCN: v_cmp_gt_i32
 define amdgpu_kernel void @setcc_v4i32_expand(
   <4 x i32> addrspace(1)* %a,
   <4 x i32> addrspace(1)* %b,

diff  --git a/llvm/test/CodeGen/AMDGPU/setcc64.ll b/llvm/test/CodeGen/AMDGPU/setcc64.ll
index 0f27556844e4..1f1bdb055302 100644
--- a/llvm/test/CodeGen/AMDGPU/setcc64.ll
+++ b/llvm/test/CodeGen/AMDGPU/setcc64.ll
@@ -159,10 +159,7 @@ entry:
 ;;;==========================================================================;;;
 
 ; GCN-LABEL: {{^}}i64_eq:
-; SI: v_cmp_eq_u64
-; VI: s_cmp_eq_u64
-; VI: s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; VI: v_cndmask_b32_e64 v{{[0-9]+}}, 0, -1, [[MASK]]
+; GCN: v_cmp_eq_u64
 define amdgpu_kernel void @i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
   %tmp0 = icmp eq i64 %a, %b
@@ -172,8 +169,7 @@ entry:
 }
 
 ; GCN-LABEL: {{^}}i64_ne:
-; SI: v_cmp_ne_u64
-; VI: s_cmp_lg_u64
+; GCN: v_cmp_ne_u64
 define amdgpu_kernel void @i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
   %tmp0 = icmp ne i64 %a, %b

diff  --git a/llvm/test/CodeGen/AMDGPU/shift-i128.ll b/llvm/test/CodeGen/AMDGPU/shift-i128.ll
index 464a6432b459..59aebaeed56e 100644
--- a/llvm/test/CodeGen/AMDGPU/shift-i128.ll
+++ b/llvm/test/CodeGen/AMDGPU/shift-i128.ll
@@ -192,16 +192,14 @@ define amdgpu_kernel void @s_shl_i128_ss(i128 %lhs, i128 %rhs) {
 ; GCN-NEXT:    s_sub_i32 s2, s8, 64
 ; GCN-NEXT:    s_lshl_b64 s[0:1], s[6:7], s8
 ; GCN-NEXT:    s_lshr_b64 s[10:11], s[4:5], s9
-; GCN-NEXT:    s_lshl_b64 s[2:3], s[4:5], s2
 ; GCN-NEXT:    s_or_b64 s[10:11], s[0:1], s[10:11]
-; GCN-NEXT:    s_cmp_lt_u32 s8, 64
-; GCN-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s8, 0
+; GCN-NEXT:    s_lshl_b64 s[2:3], s[4:5], s2
 ; GCN-NEXT:    v_mov_b32_e32 v0, s3
 ; GCN-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-NEXT:    v_cmp_lt_u32_e64 vcc, s8, 64
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, s7
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s8, 0
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, v0, v1, s[0:1]
 ; GCN-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-NEXT:    v_mov_b32_e32 v1, s10
@@ -233,14 +231,12 @@ define amdgpu_kernel void @s_lshr_i128_ss(i128 %lhs, i128 %rhs) {
 ; GCN-NEXT:    s_lshl_b64 s[10:11], s[6:7], s9
 ; GCN-NEXT:    s_or_b64 s[10:11], s[0:1], s[10:11]
 ; GCN-NEXT:    s_lshr_b64 s[2:3], s[6:7], s2
-; GCN-NEXT:    s_cmp_lt_u32 s8, 64
-; GCN-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s8, 0
 ; GCN-NEXT:    v_mov_b32_e32 v0, s3
 ; GCN-NEXT:    v_mov_b32_e32 v1, s11
+; GCN-NEXT:    v_cmp_lt_u32_e64 vcc, s8, 64
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s8, 0
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
 ; GCN-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-NEXT:    v_mov_b32_e32 v2, s10
@@ -264,27 +260,25 @@ define amdgpu_kernel void @s_ashr_i128_ss(i128 %lhs, i128 %rhs) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx8 s[4:11], s[4:5], 0x0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s2, s7, 31
 ; GCN-NEXT:    s_ashr_i64 s[0:1], s[6:7], s8
-; GCN-NEXT:    s_cmp_lt_u32 s8, 64
-; GCN-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-NEXT:    v_mov_b32_e32 v1, s1
-; GCN-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
-; GCN-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-NEXT:    s_ashr_i32 s2, s7, 31
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
 ; GCN-NEXT:    s_sub_i32 s0, s8, 64
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-NEXT:    s_ashr_i64 s[2:3], s[6:7], s0
 ; GCN-NEXT:    s_sub_i32 s0, 64, s8
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    s_lshl_b64 s[0:1], s[6:7], s0
 ; GCN-NEXT:    s_lshr_b64 s[6:7], s[4:5], s8
+; GCN-NEXT:    v_cmp_lt_u32_e64 vcc, s8, 64
 ; GCN-NEXT:    s_or_b64 s[6:7], s[6:7], s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-NEXT:    s_cmp_eq_u32 s8, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v0, v2, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v0, s3
 ; GCN-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s8, 0
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
 ; GCN-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-NEXT:    v_mov_b32_e32 v4, s6

diff  --git a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
index c9ebe57eecfa..7387e98ae864 100644
--- a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
@@ -76,8 +76,7 @@ define amdgpu_kernel void @phi_cond_outside_loop(i32 %b) {
 ; SI-NEXT:  ; %bb.1: ; %else
 ; SI-NEXT:    s_load_dword s0, s[0:1], 0x9
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_cmp_eq_u32 s0, 0
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; SI-NEXT:    v_cmp_eq_u32_e64 s[0:1], s0, 0
 ; SI-NEXT:    s_and_b64 s[4:5], s[0:1], exec
 ; SI-NEXT:  BB1_2: ; %endif
 ; SI-NEXT:    s_or_b64 exec, exec, s[6:7]
@@ -101,8 +100,7 @@ define amdgpu_kernel void @phi_cond_outside_loop(i32 %b) {
 ; FLAT-NEXT:  ; %bb.1: ; %else
 ; FLAT-NEXT:    s_load_dword s0, s[0:1], 0x24
 ; FLAT-NEXT:    s_waitcnt lgkmcnt(0)
-; FLAT-NEXT:    s_cmp_eq_u32 s0, 0
-; FLAT-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; FLAT-NEXT:    v_cmp_eq_u32_e64 s[0:1], s0, 0
 ; FLAT-NEXT:    s_and_b64 s[4:5], s[0:1], exec
 ; FLAT-NEXT:  BB1_2: ; %endif
 ; FLAT-NEXT:    s_or_b64 exec, exec, s[6:7]
@@ -171,14 +169,11 @@ define amdgpu_kernel void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32
 ; SI-NEXT:    s_load_dword s8, s[0:1], 0xc
 ; SI-NEXT:    s_brev_b32 s9, 44
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_cmp_lt_i32 s2, 1
-; SI-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; SI-NEXT:    s_cmp_lt_i32 s3, 4
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; SI-NEXT:    s_cmp_gt_i32 s3, 3
-; SI-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; SI-NEXT:    s_and_b64 s[2:3], s[4:5], s[2:3]
-; SI-NEXT:    s_and_b64 s[0:1], exec, s[0:1]
+; SI-NEXT:    v_cmp_lt_i32_e64 s[0:1], s2, 1
+; SI-NEXT:    v_cmp_lt_i32_e64 s[4:5], s3, 4
+; SI-NEXT:    v_cmp_gt_i32_e64 s[2:3], s3, 3
+; SI-NEXT:    s_and_b64 s[2:3], s[0:1], s[2:3]
+; SI-NEXT:    s_and_b64 s[0:1], exec, s[4:5]
 ; SI-NEXT:    s_and_b64 s[2:3], exec, s[2:3]
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_cmp_lt_f32_e64 s[4:5], |v0|, s9
@@ -187,48 +182,49 @@ define amdgpu_kernel void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32
 ; SI-NEXT:    s_branch BB3_4
 ; SI-NEXT:  BB3_1: ; %Flow6
 ; SI-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; SI-NEXT:    s_mov_b64 s[12:13], 0
+; SI-NEXT:    s_mov_b64 s[10:11], 0
 ; SI-NEXT:  BB3_2: ; %Flow5
 ; SI-NEXT:    ; in Loop: Header=BB3_4 Depth=1
 ; SI-NEXT:    s_mov_b64 s[14:15], 0
 ; SI-NEXT:  BB3_3: ; %Flow
 ; SI-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; SI-NEXT:    s_and_b64 vcc, exec, s[10:11]
+; SI-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; SI-NEXT:    s_cbranch_vccnz BB3_8
 ; SI-NEXT:  BB3_4: ; %while.cond
 ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; SI-NEXT:    s_mov_b64 s[14:15], -1
-; SI-NEXT:    s_mov_b64 s[12:13], -1
 ; SI-NEXT:    s_mov_b64 s[10:11], -1
+; SI-NEXT:    s_mov_b64 s[12:13], -1
 ; SI-NEXT:    s_mov_b64 vcc, s[0:1]
 ; SI-NEXT:    s_cbranch_vccz BB3_3
 ; SI-NEXT:  ; %bb.5: ; %convex.exit
 ; SI-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; SI-NEXT:    s_mov_b64 s[12:13], -1
 ; SI-NEXT:    s_mov_b64 s[10:11], -1
+; SI-NEXT:    s_mov_b64 s[12:13], -1
 ; SI-NEXT:    s_mov_b64 vcc, s[2:3]
 ; SI-NEXT:    s_cbranch_vccz BB3_2
 ; SI-NEXT:  ; %bb.6: ; %if.end
 ; SI-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; SI-NEXT:    s_mov_b64 s[10:11], -1
+; SI-NEXT:    s_mov_b64 s[12:13], -1
 ; SI-NEXT:    s_mov_b64 vcc, s[4:5]
 ; SI-NEXT:    s_cbranch_vccz BB3_1
 ; SI-NEXT:  ; %bb.7: ; %if.else
 ; SI-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; SI-NEXT:    s_mov_b64 s[10:11], 0
+; SI-NEXT:    s_mov_b64 s[12:13], 0
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_branch BB3_1
 ; SI-NEXT:  BB3_8: ; %loop.exit.guard4
 ; SI-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; SI-NEXT:    s_and_b64 vcc, exec, s[12:13]
+; SI-NEXT:    s_and_b64 vcc, exec, s[10:11]
 ; SI-NEXT:    s_cbranch_vccz BB3_4
 ; SI-NEXT:  ; %bb.9: ; %loop.exit.guard
 ; SI-NEXT:    s_and_b64 vcc, exec, s[14:15]
 ; SI-NEXT:    s_cbranch_vccz BB3_13
 ; SI-NEXT:  ; %bb.10: ; %for.cond.preheader
-; SI-NEXT:    s_cmpk_lt_i32 s8, 0x3e8
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; SI-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, 0x3e8
+; SI-NEXT:    v_cmp_lt_i32_e32 vcc, s8, v0
+; SI-NEXT:    s_and_b64 vcc, exec, vcc
 ; SI-NEXT:    s_cbranch_vccz BB3_13
 ; SI-NEXT:  ; %bb.11: ; %for.body
 ; SI-NEXT:    s_and_b64 vcc, exec, 0
@@ -247,14 +243,11 @@ define amdgpu_kernel void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32
 ; FLAT-NEXT:    s_load_dword s8, s[0:1], 0x30
 ; FLAT-NEXT:    s_brev_b32 s9, 44
 ; FLAT-NEXT:    s_waitcnt lgkmcnt(0)
-; FLAT-NEXT:    s_cmp_lt_i32 s2, 1
-; FLAT-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; FLAT-NEXT:    s_cmp_lt_i32 s3, 4
-; FLAT-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; FLAT-NEXT:    s_cmp_gt_i32 s3, 3
-; FLAT-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; FLAT-NEXT:    s_and_b64 s[2:3], s[4:5], s[2:3]
-; FLAT-NEXT:    s_and_b64 s[0:1], exec, s[0:1]
+; FLAT-NEXT:    v_cmp_lt_i32_e64 s[0:1], s2, 1
+; FLAT-NEXT:    v_cmp_lt_i32_e64 s[4:5], s3, 4
+; FLAT-NEXT:    v_cmp_gt_i32_e64 s[2:3], s3, 3
+; FLAT-NEXT:    s_and_b64 s[2:3], s[0:1], s[2:3]
+; FLAT-NEXT:    s_and_b64 s[0:1], exec, s[4:5]
 ; FLAT-NEXT:    s_and_b64 s[2:3], exec, s[2:3]
 ; FLAT-NEXT:    s_waitcnt vmcnt(0)
 ; FLAT-NEXT:    v_cmp_lt_f32_e64 s[4:5], |v0|, s9
@@ -263,48 +256,48 @@ define amdgpu_kernel void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32
 ; FLAT-NEXT:    s_branch BB3_4
 ; FLAT-NEXT:  BB3_1: ; %Flow6
 ; FLAT-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; FLAT-NEXT:    s_mov_b64 s[12:13], 0
+; FLAT-NEXT:    s_mov_b64 s[10:11], 0
 ; FLAT-NEXT:  BB3_2: ; %Flow5
 ; FLAT-NEXT:    ; in Loop: Header=BB3_4 Depth=1
 ; FLAT-NEXT:    s_mov_b64 s[14:15], 0
 ; FLAT-NEXT:  BB3_3: ; %Flow
 ; FLAT-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; FLAT-NEXT:    s_and_b64 vcc, exec, s[10:11]
+; FLAT-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; FLAT-NEXT:    s_cbranch_vccnz BB3_8
 ; FLAT-NEXT:  BB3_4: ; %while.cond
 ; FLAT-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; FLAT-NEXT:    s_mov_b64 s[14:15], -1
-; FLAT-NEXT:    s_mov_b64 s[12:13], -1
 ; FLAT-NEXT:    s_mov_b64 s[10:11], -1
+; FLAT-NEXT:    s_mov_b64 s[12:13], -1
 ; FLAT-NEXT:    s_mov_b64 vcc, s[0:1]
 ; FLAT-NEXT:    s_cbranch_vccz BB3_3
 ; FLAT-NEXT:  ; %bb.5: ; %convex.exit
 ; FLAT-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; FLAT-NEXT:    s_mov_b64 s[12:13], -1
 ; FLAT-NEXT:    s_mov_b64 s[10:11], -1
+; FLAT-NEXT:    s_mov_b64 s[12:13], -1
 ; FLAT-NEXT:    s_mov_b64 vcc, s[2:3]
 ; FLAT-NEXT:    s_cbranch_vccz BB3_2
 ; FLAT-NEXT:  ; %bb.6: ; %if.end
 ; FLAT-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; FLAT-NEXT:    s_mov_b64 s[10:11], -1
+; FLAT-NEXT:    s_mov_b64 s[12:13], -1
 ; FLAT-NEXT:    s_mov_b64 vcc, s[4:5]
 ; FLAT-NEXT:    s_cbranch_vccz BB3_1
 ; FLAT-NEXT:  ; %bb.7: ; %if.else
 ; FLAT-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; FLAT-NEXT:    s_mov_b64 s[10:11], 0
+; FLAT-NEXT:    s_mov_b64 s[12:13], 0
 ; FLAT-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; FLAT-NEXT:    s_branch BB3_1
 ; FLAT-NEXT:  BB3_8: ; %loop.exit.guard4
 ; FLAT-NEXT:    ; in Loop: Header=BB3_4 Depth=1
-; FLAT-NEXT:    s_and_b64 vcc, exec, s[12:13]
+; FLAT-NEXT:    s_and_b64 vcc, exec, s[10:11]
 ; FLAT-NEXT:    s_cbranch_vccz BB3_4
 ; FLAT-NEXT:  ; %bb.9: ; %loop.exit.guard
 ; FLAT-NEXT:    s_and_b64 vcc, exec, s[14:15]
 ; FLAT-NEXT:    s_cbranch_vccz BB3_13
 ; FLAT-NEXT:  ; %bb.10: ; %for.cond.preheader
-; FLAT-NEXT:    s_cmpk_lt_i32 s8, 0x3e8
-; FLAT-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; FLAT-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; FLAT-NEXT:    v_mov_b32_e32 v0, 0x3e8
+; FLAT-NEXT:    v_cmp_lt_i32_e32 vcc, s8, v0
+; FLAT-NEXT:    s_and_b64 vcc, exec, vcc
 ; FLAT-NEXT:    s_cbranch_vccz BB3_13
 ; FLAT-NEXT:  ; %bb.11: ; %for.body
 ; FLAT-NEXT:    s_and_b64 vcc, exec, 0

diff  --git a/llvm/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll b/llvm/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll
index 7b5a3188e794..d1dac8c8c782 100644
--- a/llvm/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-annotate-cfg-loop-assert.ll
@@ -6,10 +6,8 @@ define amdgpu_kernel void @test(i32 %arg, i32 %arg1) {
 ; CHECK:       ; %bb.0: ; %bb
 ; CHECK-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
 ; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    s_cmp_eq_u32 s0, 0
-; CHECK-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; CHECK-NEXT:    s_cmp_eq_u32 s1, 0
-; CHECK-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; CHECK-NEXT:    v_cmp_eq_u32_e64 s[2:3], s0, 0
+; CHECK-NEXT:    v_cmp_eq_u32_e64 s[0:1], s1, 0
 ; CHECK-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
 ; CHECK-NEXT:    s_and_b64 vcc, exec, s[0:1]
 ; CHECK-NEXT:    s_cbranch_vccnz BB0_3

diff  --git a/llvm/test/CodeGen/AMDGPU/sign_extend.ll b/llvm/test/CodeGen/AMDGPU/sign_extend.ll
index 94bf590c72ee..1a585f8b39be 100644
--- a/llvm/test/CodeGen/AMDGPU/sign_extend.ll
+++ b/llvm/test/CodeGen/AMDGPU/sign_extend.ll
@@ -10,9 +10,9 @@ define amdgpu_kernel void @s_sext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_cmp_eq_u32 s0, s1
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -23,9 +23,9 @@ define amdgpu_kernel void @s_sext_i1_to_i32(i32 addrspace(1)* %out, i32 %a, i32
 ; VI-NEXT:    s_mov_b32 s7, 0xf000
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_cmp_eq_u32 s0, s1
-; VI-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %cmp = icmp eq i32 %a, %b
@@ -80,9 +80,9 @@ define amdgpu_kernel void @s_sext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_cmp_eq_u32 s0, s1
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; SI-NEXT:    v_mov_b32_e32 v1, v0
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -94,9 +94,9 @@ define amdgpu_kernel void @s_sext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32
 ; VI-NEXT:    s_mov_b32 s7, 0xf000
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_cmp_eq_u32 s0, s1
-; VI-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; VI-NEXT:    v_mov_b32_e32 v1, v0
 ; VI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
@@ -218,9 +218,9 @@ define amdgpu_kernel void @s_sext_i1_to_i16(i16 addrspace(1)* %out, i32 %a, i32
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_cmp_eq_u32 s0, s1
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; SI-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -231,9 +231,9 @@ define amdgpu_kernel void @s_sext_i1_to_i16(i16 addrspace(1)* %out, i32 %a, i32
 ; VI-NEXT:    s_mov_b32 s7, 0xf000
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_cmp_eq_u32 s0, s1
-; VI-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; VI-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
   %cmp = icmp eq i32 %a, %b
@@ -254,11 +254,11 @@ define amdgpu_kernel void @s_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_cmp_eq_u32 s0, s1
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; SI-NEXT:    s_cmp_eq_u32 s2, s3
-; SI-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; SI-NEXT:    s_and_b64 s[0:1], s[0:1], s[2:3]
+; SI-NEXT:    v_mov_b32_e32 v0, s1
+; SI-NEXT:    v_mov_b32_e32 v1, s3
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; SI-NEXT:    v_cmp_eq_u32_e64 s[0:1], s2, v1
+; SI-NEXT:    s_and_b64 s[0:1], vcc, s[0:1]
 ; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; SI-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -270,11 +270,11 @@ define amdgpu_kernel void @s_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32
 ; VI-NEXT:    s_mov_b32 s7, 0xf000
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_cmp_eq_u32 s0, s1
-; VI-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; VI-NEXT:    s_cmp_eq_u32 s2, s3
-; VI-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; VI-NEXT:    s_and_b64 s[0:1], s[0:1], s[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s1
+; VI-NEXT:    v_mov_b32_e32 v1, s3
+; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
+; VI-NEXT:    v_cmp_eq_u32_e64 s[0:1], s2, v1
+; VI-NEXT:    s_and_b64 s[0:1], vcc, s[0:1]
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; VI-NEXT:    buffer_store_short v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
@@ -294,9 +294,9 @@ define amdgpu_kernel void @v_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_cmp_eq_u32 s1, s2
 ; SI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
-; SI-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    v_cmp_eq_u32_e64 s[0:1], s1, v0
 ; SI-NEXT:    s_and_b64 s[0:1], vcc, s[0:1]
 ; SI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; SI-NEXT:    buffer_store_short v0, off, s[4:7], 0
@@ -309,9 +309,9 @@ define amdgpu_kernel void @v_sext_i1_to_i16_with_and(i16 addrspace(1)* %out, i32
 ; VI-NEXT:    s_mov_b32 s7, 0xf000
 ; VI-NEXT:    s_mov_b32 s6, -1
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_cmp_eq_u32 s1, s2
 ; VI-NEXT:    v_cmp_eq_u32_e32 vcc, s0, v0
-; VI-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; VI-NEXT:    v_mov_b32_e32 v0, s2
+; VI-NEXT:    v_cmp_eq_u32_e64 s[0:1], s1, v0
 ; VI-NEXT:    s_and_b64 s[0:1], vcc, s[0:1]
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; VI-NEXT:    buffer_store_short v0, off, s[4:7], 0

diff  --git a/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll b/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
index 577cb7c52a7c..663831f06f4b 100644
--- a/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sint_to_fp.f64.ll
@@ -15,8 +15,7 @@ define amdgpu_kernel void @sint_to_fp_i32_to_f64(double addrspace(1)* %out, i32
 ; uses an SGPR (implicit vcc).
 
 ; GCN-LABEL: {{^}}sint_to_fp_i1_f64:
-; GCN-DAG: s_cmp_eq
-; GCN-DAG: s_cselect_b64
+; GCN-DAG: v_cmp_eq_u32_e64 vcc,
 ; GCN-DAG: v_cndmask_b32_e32 v[[SEL:[0-9]+]], 0, v{{[0-9]+}}
 ; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[ZERO]]:[[SEL]]{{\]}}

diff  --git a/llvm/test/CodeGen/AMDGPU/sint_to_fp.ll b/llvm/test/CodeGen/AMDGPU/sint_to_fp.ll
index 33c32f5a77d7..7f1f9954105b 100644
--- a/llvm/test/CodeGen/AMDGPU/sint_to_fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/sint_to_fp.ll
@@ -77,8 +77,7 @@ define amdgpu_kernel void @v_sint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4
 }
 
 ; FUNC-LABEL: {{^}}s_sint_to_fp_i1_f32:
-; SI: s_cmp_eq_u32
-; SI: s_cselect_b64 [[CMP:s\[[0-9]+:[0-9]\]]],
+; SI: v_cmp_eq_u32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
 ; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0, [[CMP]]
 ; SI: buffer_store_dword [[RESULT]],
 ; SI: s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index e71b53402609..a0e16ae0cef6 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -130,27 +130,25 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
-; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s7
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s8
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
 ; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
@@ -1036,29 +1034,27 @@ define amdgpu_kernel void @s_test_srem33_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    s_sub_u32 s6, s6, s2
 ; GCN-IR-NEXT:    s_subb_u32 s7, s7, s2
 ; GCN-IR-NEXT:    s_sub_u32 s8, s8, s0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s8
 ; GCN-IR-NEXT:    s_subb_u32 s9, s9, s0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[10:11]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s8
-; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_cmp_eq_u32 s9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
-; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s9
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s9
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[12:13], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[10:11]
 ; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[10:11], s[0:1], -1
@@ -1189,71 +1185,67 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48
 ;
 ; GCN-IR-LABEL: s_test_srem24_48:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xb
 ; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xc
-; GCN-IR-NEXT:    s_load_dword s4, s[0:1], 0xd
-; GCN-IR-NEXT:    s_load_dword s5, s[0:1], 0xe
+; GCN-IR-NEXT:    s_load_dword s6, s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dword s0, s[0:1], 0xe
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_sext_i32_i16 s3, s3
-; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[2:3], 24
-; GCN-IR-NEXT:    s_sext_i32_i16 s5, s5
+; GCN-IR-NEXT:    s_sext_i32_i16 s7, s0
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 24
 ; GCN-IR-NEXT:    s_ashr_i32 s2, s3, 31
-; GCN-IR-NEXT:    s_ashr_i32 s10, s5, 31
-; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[4:5], 24
+; GCN-IR-NEXT:    s_ashr_i32 s10, s7, 31
 ; GCN-IR-NEXT:    s_mov_b32 s3, s2
+; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[6:7], 24
 ; GCN-IR-NEXT:    s_mov_b32 s11, s10
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[6:7], s[2:3]
-; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[8:9], s[10:11]
-; GCN-IR-NEXT:    s_sub_u32 s4, s4, s2
-; GCN-IR-NEXT:    s_subb_u32 s5, s5, s2
-; GCN-IR-NEXT:    s_sub_u32 s6, s6, s10
-; GCN-IR-NEXT:    s_subb_u32 s7, s7, s10
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[4:5], 0
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[8:9], s[10:11]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
-; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s8
-; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s4
-; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s7
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
-; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s5
-; GCN-IR-NEXT:    s_cmp_eq_u32 s5, 0
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[8:9], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s6, s0, s2
+; GCN-IR-NEXT:    s_subb_u32 s7, s1, s2
+; GCN-IR-NEXT:    s_sub_u32 s8, s8, s10
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s8
+; GCN-IR-NEXT:    s_subb_u32 s9, s9, s10
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s9
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s8
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[12:13], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x9
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[10:11], vcc
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[10:11], s[0:1], -1
 ; GCN-IR-NEXT:    s_and_b64 s[10:11], s[10:11], vcc
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[10:11]
-; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_mov_b64 vcc, vcc
 ; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[4:5], v0
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
 ; GCN-IR-NEXT:    s_cbranch_vccz BB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
-; GCN-IR-NEXT:    s_add_u32 s10, s6, -1
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[4:5], v4
+; GCN-IR-NEXT:    s_add_u32 s10, s8, -1
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v2, v3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s11, s7, -1
+; GCN-IR-NEXT:    s_addc_u32 s11, s9, -1
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
@@ -1268,9 +1260,9 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s10, v6
 ; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s6, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v10, s8, v8
 ; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s7, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v11, s9, v8
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
 ; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
@@ -1285,9 +1277,9 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_cbranch_vccz BB9_3
 ; GCN-IR-NEXT:    s_branch BB9_6
 ; GCN-IR-NEXT:  BB9_4:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s5
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
 ; GCN-IR-NEXT:    s_branch BB9_7
 ; GCN-IR-NEXT:  BB9_5:
@@ -1298,24 +1290,24 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
 ; GCN-IR-NEXT:  BB9_7: ; %udiv-end
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, s6, v1
-; GCN-IR-NEXT:    v_mul_hi_u32 v2, s6, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v3, s7, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, s6, v0
-; GCN-IR-NEXT:    s_mov_b32 s11, 0xf000
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s8, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s8, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s9, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s8, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s5
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
 ; GCN-IR-NEXT:    v_xor_b32_e32 v0, s2, v0
 ; GCN-IR-NEXT:    v_xor_b32_e32 v1, s3, v1
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
 ; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-IR-NEXT:    s_mov_b32 s10, -1
-; GCN-IR-NEXT:    buffer_store_short v1, off, s[8:11], 0 offset:4
-; GCN-IR-NEXT:    buffer_store_dword v0, off, s[8:11], 0
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:4
+; GCN-IR-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i48 %x, 24
   %2 = ashr i48 %y, 24
@@ -1451,14 +1443,13 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_mov_b32 s1, s0
 ; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[6:7], s[0:1]
 ; GCN-IR-NEXT:    s_sub_u32 s2, s2, s0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
 ; GCN-IR-NEXT:    s_subb_u32 s3, s3, s0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
-; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s6
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
 ; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[6:7], 0, -1, vcc

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 796d8520fab7..6b29f2962043 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -131,27 +131,25 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
-; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s7
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s8
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
 ; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
@@ -705,40 +703,39 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dword s2, s[0:1], 0xd
 ; GCN-NEXT:    s_load_dword s3, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s7, 0xff000000
-; GCN-NEXT:    s_mov_b32 s6, 0xffff
-; GCN-NEXT:    v_cvt_f32_ubyte3_e32 v2, s6
+; GCN-NEXT:    s_mov_b32 s5, 0xff000000
+; GCN-NEXT:    s_mov_b32 s4, 0xffff
+; GCN-NEXT:    v_cvt_f32_ubyte3_e32 v2, s4
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_and_b32 s2, s2, s7
-; GCN-NEXT:    s_and_b32 s3, s3, s6
+; GCN-NEXT:    s_and_b32 s2, s2, s5
+; GCN-NEXT:    s_and_b32 s3, s3, s4
 ; GCN-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-NEXT:    v_alignbit_b32 v0, s3, v0, 24
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, v0
-; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s8, s[0:1], 0xb
-; GCN-NEXT:    s_load_dword s0, s[0:1], 0xc
+; GCN-NEXT:    s_load_dword s6, s[0:1], 0xb
+; GCN-NEXT:    s_load_dword s7, s[0:1], 0xc
+; GCN-NEXT:    s_lshr_b64 s[2:3], s[2:3], 24
 ; GCN-NEXT:    v_mov_b32_e32 v9, 0
-; GCN-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-NEXT:    v_mac_f32_e32 v1, 0x4f800000, v2
 ; GCN-NEXT:    v_rcp_f32_e32 v1, v1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_and_b32 s6, s0, s6
-; GCN-NEXT:    s_and_b32 s8, s8, s7
-; GCN-NEXT:    s_lshr_b64 s[0:1], s[2:3], 24
+; GCN-NEXT:    s_and_b32 s7, s7, s4
+; GCN-NEXT:    s_and_b32 s6, s6, s5
+; GCN-NEXT:    s_sub_u32 s8, 0, s2
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x5f7ffffc, v1
 ; GCN-NEXT:    v_mul_f32_e32 v2, 0x2f800000, v1
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mac_f32_e32 v1, 0xcf800000, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT:    s_sub_u32 s2, 0, s0
-; GCN-NEXT:    s_subb_u32 s3, 0, s1
-; GCN-NEXT:    v_mul_hi_u32 v3, s2, v1
-; GCN-NEXT:    v_mul_lo_u32 v4, s2, v2
-; GCN-NEXT:    v_mul_lo_u32 v5, s3, v1
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, s2, v1
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    s_subb_u32 s9, 0, s3
+; GCN-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-NEXT:    v_mul_lo_u32 v3, s8, v2
+; GCN-NEXT:    v_mul_hi_u32 v4, s8, v1
+; GCN-NEXT:    v_mul_lo_u32 v5, s9, v1
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-NEXT:    v_mul_lo_u32 v4, s8, v1
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
 ; GCN-NEXT:    v_mul_lo_u32 v6, v1, v3
 ; GCN-NEXT:    v_mul_hi_u32 v5, v1, v3
@@ -753,14 +750,14 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v5, v4, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v10, v8, vcc
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
-; GCN-NEXT:    v_add_i32_e64 v1, s[0:1], v1, v3
+; GCN-NEXT:    v_add_i32_e64 v1, s[2:3], v1, v3
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v9, v5, vcc
-; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v2, v4, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v5, s2, v3
-; GCN-NEXT:    v_mul_hi_u32 v6, s2, v1
-; GCN-NEXT:    v_mul_lo_u32 v7, s3, v1
+; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v2, v4, s[2:3]
+; GCN-NEXT:    v_mul_lo_u32 v5, s8, v3
+; GCN-NEXT:    v_mul_hi_u32 v6, s8, v1
+; GCN-NEXT:    v_mul_lo_u32 v7, s9, v1
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; GCN-NEXT:    v_mul_lo_u32 v6, s2, v1
+; GCN-NEXT:    v_mul_lo_u32 v6, s8, v1
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
 ; GCN-NEXT:    v_mul_lo_u32 v11, v1, v5
 ; GCN-NEXT:    v_mul_hi_u32 v13, v1, v5
@@ -777,10 +774,10 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v9, v5, vcc
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v2, v5, s[0:1]
+; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v2, v5, s[2:3]
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; GCN-NEXT:    v_mov_b32_e32 v3, s8
-; GCN-NEXT:    v_alignbit_b32 v3, s6, v3, 24
+; GCN-NEXT:    v_mov_b32_e32 v3, s6
+; GCN-NEXT:    v_alignbit_b32 v3, s7, v3, 24
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v2, vcc
 ; GCN-NEXT:    v_mul_hi_u32 v5, v3, v1
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v2
@@ -794,82 +791,80 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v2, v8, vcc
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, 0, v1
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v9, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v1
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, 2, v1
-; GCN-NEXT:    v_mul_lo_u32 v10, v0, v1
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v4, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v5, v0, v1
+; GCN-NEXT:    v_mul_lo_u32 v6, v0, v1
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
+; GCN-NEXT:    v_sub_i32_e32 v3, vcc, v3, v6
+; GCN-NEXT:    v_subb_u32_e32 v4, vcc, 0, v4, vcc
+; GCN-NEXT:    v_sub_i32_e32 v5, vcc, v3, v0
+; GCN-NEXT:    v_subbrev_u32_e32 v6, vcc, 0, v4, vcc
+; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v0
+; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v6
+; GCN-NEXT:    v_cndmask_b32_e32 v5, -1, v5, vcc
+; GCN-NEXT:    v_add_i32_e32 v6, vcc, 2, v1
+; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v2, vcc
 ; GCN-NEXT:    v_add_i32_e32 v8, vcc, 1, v1
-; GCN-NEXT:    v_addc_u32_e32 v9, vcc, 0, v2, vcc
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, v3, v10
-; GCN-NEXT:    v_subb_u32_e32 v6, vcc, 0, v6, vcc
-; GCN-NEXT:    v_sub_i32_e32 v7, vcc, v3, v0
-; GCN-NEXT:    v_subbrev_u32_e32 v10, vcc, 0, v6, vcc
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v7, v0
 ; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], v3, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v10
-; GCN-NEXT:    v_cndmask_b32_e32 v7, -1, v7, vcc
+; GCN-NEXT:    v_addc_u32_e32 v9, vcc, 0, v2, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v6
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v4
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, -1, v0, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v8, v4, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v5, v8, v6, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v1, v4, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v9, v5, vcc
-; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    v_cndmask_b32_e64 v0, v1, v5, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v9, v7, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v1, s[0:1]
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:4
 ; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_udiv24_i48:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xb
 ; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xc
-; GCN-IR-NEXT:    s_load_dword s4, s[0:1], 0xd
-; GCN-IR-NEXT:    s_load_dword s6, s[0:1], 0xe
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xff000000
-; GCN-IR-NEXT:    s_mov_b32 s5, 0xffff
+; GCN-IR-NEXT:    s_load_dword s6, s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dword s7, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s8, 0xffff
+; GCN-IR-NEXT:    s_mov_b32 s9, 0xff000000
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_and_b32 s3, s3, s5
-; GCN-IR-NEXT:    s_and_b32 s2, s2, s7
-; GCN-IR-NEXT:    s_and_b32 s5, s6, s5
-; GCN-IR-NEXT:    s_and_b32 s4, s4, s7
-; GCN-IR-NEXT:    s_lshr_b64 s[6:7], s[2:3], 24
-; GCN-IR-NEXT:    s_lshr_b64 s[2:3], s[4:5], 24
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[4:5], s[8:9]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s2
-; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
-; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s4
-; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s3
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s5
-; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s7
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_and_b32 s1, s3, s8
+; GCN-IR-NEXT:    s_and_b32 s0, s2, s9
+; GCN-IR-NEXT:    s_and_b32 s3, s7, s8
+; GCN-IR-NEXT:    s_and_b32 s2, s6, s9
+; GCN-IR-NEXT:    s_lshr_b64 s[2:3], s[2:3], 24
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
+; GCN-IR-NEXT:    s_lshr_b64 s[6:7], s[0:1], 24
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s5
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s4
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[8:9], vcc
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
 ; GCN-IR-NEXT:    s_and_b64 s[8:9], s[8:9], vcc
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
-; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_mov_b64 vcc, vcc
 ; GCN-IR-NEXT:    s_cbranch_vccz BB7_4
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
@@ -1060,12 +1055,11 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
-; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
 ; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[2:3], 0, -1, vcc
@@ -1506,31 +1500,31 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v5, v7, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v8, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, 24
-; GCN-NEXT:    v_mul_hi_u32 v5, v0, 24
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 2, v0
-; GCN-NEXT:    v_mul_lo_u32 v8, v0, 24
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GCN-NEXT:    v_sub_i32_e32 v8, vcc, s10, v8
-; GCN-NEXT:    v_mov_b32_e32 v5, s11
-; GCN-NEXT:    v_subb_u32_e32 v4, vcc, v5, v4, vcc
-; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, 24, v8
-; GCN-NEXT:    v_subbrev_u32_e32 v9, vcc, 0, v4, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, 23, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v9
-; GCN-NEXT:    v_cndmask_b32_e32 v5, -1, v5, vcc
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], 23, v8
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, -1, v5, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v7, v3, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, 24
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, 24
+; GCN-NEXT:    v_mul_lo_u32 v4, v0, 24
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s10, v4
+; GCN-NEXT:    v_mov_b32_e32 v3, s11
+; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v3, v2, vcc
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, 24, v4
+; GCN-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v2, vcc
+; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, 23, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; GCN-NEXT:    v_cndmask_b32_e32 v3, -1, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, 2, v0
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-NEXT:    v_cmp_lt_u32_e64 s[0:1], 23, v4
+; GCN-NEXT:    v_addc_u32_e32 v8, vcc, 0, v1, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[0:1]
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v2
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; GCN-NEXT:    v_cndmask_b32_e64 v2, -1, v4, s[0:1]
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v8, v6, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v7, v5, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
@@ -1541,12 +1535,11 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
-; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 59, v2
 ; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[2:3], 0, 0, vcc

diff  --git a/llvm/test/CodeGen/AMDGPU/udivrem.ll b/llvm/test/CodeGen/AMDGPU/udivrem.ll
index a01e3ea060d3..f581c4709de7 100644
--- a/llvm/test/CodeGen/AMDGPU/udivrem.ll
+++ b/llvm/test/CodeGen/AMDGPU/udivrem.ll
@@ -42,58 +42,58 @@ define amdgpu_kernel void @test_udivrem(i32 addrspace(1)* %out0, [8 x i32], i32
 ;
 ; GFX6-LABEL: test_udivrem:
 ; GFX6:       ; %bb.0:
-; GFX6-NEXT:    s_load_dword s12, s[0:1], 0x26
-; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GFX6-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x13
-; GFX6-NEXT:    s_load_dword s0, s[0:1], 0x1d
-; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
-; GFX6-NEXT:    s_mov_b32 s10, s6
-; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s12
-; GFX6-NEXT:    s_mov_b32 s11, s7
-; GFX6-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GFX6-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v1, v0, s12
-; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s12
-; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[2:3], 0, v2
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[2:3]
-; GFX6-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[2:3]
-; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s0
-; GFX6-NEXT:    v_mul_lo_u32 v1, v0, s12
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
-; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s0, v1
-; GFX6-NEXT:    v_cmp_ge_u32_e64 s[0:1], s0, v1
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v4
-; GFX6-NEXT:    v_subrev_i32_e32 v1, vcc, s12, v4
-; GFX6-NEXT:    v_add_i32_e32 v5, vcc, s12, v4
-; GFX6-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, v4, v1, vcc
-; GFX6-NEXT:    buffer_store_dword v0, off, s[4:7], 0
-; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v5, v1, s[0:1]
-; GFX6-NEXT:    buffer_store_dword v0, off, s[8:11], 0
-; GFX6-NEXT:    s_endpgm
+; GFX6-NEXT:	s_load_dword s12, s[0:1], 0x26
+; GFX6-NEXT:	s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GFX6-NEXT:	s_load_dwordx2 s[8:9], s[0:1], 0x13
+; GFX6-NEXT:	s_load_dword s0, s[0:1], 0x1d
+; GFX6-NEXT:	s_mov_b32 s7, 0xf000
+; GFX6-NEXT:	s_mov_b32 s6, -1
+; GFX6-NEXT:	s_mov_b32 s10, s6
+; GFX6-NEXT:	s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:	v_cvt_f32_u32_e32 v0, s12
+; GFX6-NEXT:	s_mov_b32 s11, s7
+; GFX6-NEXT:	v_rcp_iflag_f32_e32 v0, v0
+; GFX6-NEXT:	v_mul_f32_e32 v0, 0x4f800000, v0
+; GFX6-NEXT:	v_cvt_u32_f32_e32 v0, v0
+; GFX6-NEXT:	v_mul_lo_u32 v1, v0, s12
+; GFX6-NEXT:	v_mul_hi_u32 v2, v0, s12
+; GFX6-NEXT:	v_sub_i32_e32 v3, vcc, 0, v1
+; GFX6-NEXT:	v_cmp_eq_u32_e64 s[2:3], 0, v2
+; GFX6-NEXT:	v_cndmask_b32_e64 v1, v1, v3, s[2:3]
+; GFX6-NEXT:	v_mul_hi_u32 v1, v1, v0
+; GFX6-NEXT:	v_add_i32_e32 v2, vcc, v1, v0
+; GFX6-NEXT:	v_subrev_i32_e32 v0, vcc, v1, v0
+; GFX6-NEXT:	v_cndmask_b32_e64 v0, v0, v2, s[2:3]
+; GFX6-NEXT:	v_mul_hi_u32 v0, v0, s0
+; GFX6-NEXT:	v_mul_lo_u32 v1, v0, s12
+; GFX6-NEXT:	v_add_i32_e32 v2, vcc, 1, v0
+; GFX6-NEXT:	v_add_i32_e32 v3, vcc, -1, v0
+; GFX6-NEXT:	v_sub_i32_e32 v4, vcc, s0, v1
+; GFX6-NEXT:	v_cmp_ge_u32_e64 s[0:1], s0, v1
+; GFX6-NEXT:	v_cmp_le_u32_e64 s[2:3], s12, v4
+; GFX6-NEXT:	v_subrev_i32_e32 v1, vcc, s12, v4
+; GFX6-NEXT:	v_add_i32_e32 v5, vcc, s12, v4
+; GFX6-NEXT:	s_and_b64 vcc, s[2:3], s[0:1]
+; GFX6-NEXT:	v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX6-NEXT:	v_cndmask_b32_e64 v0, v3, v0, s[0:1]
+; GFX6-NEXT:	v_cndmask_b32_e32 v1, v4, v1, vcc
+; GFX6-NEXT:	buffer_store_dword v0, off, s[4:7], 0
+; GFX6-NEXT:	s_waitcnt expcnt(0)
+; GFX6-NEXT:	v_cndmask_b32_e64 v0, v5, v1, s[0:1]
+; GFX6-NEXT:	buffer_store_dword v0, off, s[8:11], 0
+; GFX6-NEXT:	s_endpgm
 ;
 ; GFX8-LABEL: test_udivrem:
 ; GFX8:       ; %bb.0:
-; GFX8-NEXT:    s_load_dword s6, s[0:1], 0x98
-; GFX8-NEXT:    s_load_dword s7, s[0:1], 0x74
+; GFX8-NEXT:    s_load_dword s7, s[0:1], 0x98
+; GFX8-NEXT:    s_load_dword s6, s[0:1], 0x74
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX8-NEXT:    v_cvt_f32_u32_e32 v0, s7
 ; GFX8-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX8-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
 ; GFX8-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX8-NEXT:    v_mul_lo_u32 v1, v0, s6
-; GFX8-NEXT:    v_mul_hi_u32 v2, v0, s6
+; GFX8-NEXT:    v_mul_lo_u32 v1, v0, s7
+; GFX8-NEXT:    v_mul_hi_u32 v2, v0, s7
 ; GFX8-NEXT:    v_sub_u32_e32 v3, vcc, 0, v1
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[2:3], 0, v2
 ; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[2:3]
@@ -101,20 +101,20 @@ define amdgpu_kernel void @test_udivrem(i32 addrspace(1)* %out0, [8 x i32], i32
 ; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v1, v0
 ; GFX8-NEXT:    v_subrev_u32_e32 v0, vcc, v1, v0
 ; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[2:3]
-; GFX8-NEXT:    v_mul_hi_u32 v2, v0, s7
+; GFX8-NEXT:    v_mul_hi_u32 v2, v0, s6
 ; GFX8-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x24
 ; GFX8-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x4c
-; GFX8-NEXT:    v_mul_lo_u32 v3, v2, s6
+; GFX8-NEXT:    v_mul_lo_u32 v3, v2, s7
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 1, v2
-; GFX8-NEXT:    v_sub_u32_e32 v6, vcc, s7, v3
-; GFX8-NEXT:    v_cmp_ge_u32_e64 s[0:1], s7, v3
+; GFX8-NEXT:    v_sub_u32_e32 v6, vcc, s6, v3
+; GFX8-NEXT:    v_cmp_ge_u32_e64 s[0:1], s6, v3
 ; GFX8-NEXT:    v_add_u32_e32 v5, vcc, -1, v2
-; GFX8-NEXT:    v_cmp_le_u32_e64 s[2:3], s6, v6
-; GFX8-NEXT:    v_subrev_u32_e32 v3, vcc, s6, v6
-; GFX8-NEXT:    v_add_u32_e32 v7, vcc, s6, v6
+; GFX8-NEXT:    v_cmp_le_u32_e64 s[2:3], s7, v6
+; GFX8-NEXT:    v_subrev_u32_e32 v3, vcc, s7, v6
+; GFX8-NEXT:    v_add_u32_e32 v7, vcc, s7, v6
 ; GFX8-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]

diff  --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
index b34c82b0de7e..75d6eb57cb0b 100644
--- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
+++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.f64.ll
@@ -75,8 +75,7 @@ define amdgpu_kernel void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)
 ; uses an SGPR (implicit vcc).
 
 ; GCN-LABEL: {{^}}uint_to_fp_i1_to_f64:
-; GCN-DAG: s_cmp_eq_u32
-; GCN-DAG: s_cselect_b64 vcc
+; GCN-DAG: v_cmp_eq_u32_e64 vcc
 ; GCN-DAG: v_cndmask_b32_e32 v[[SEL:[0-9]+]], 0, v{{[0-9]+}}
 ; GCN-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
 ; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[ZERO]]:[[SEL]]{{\]}}

diff  --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll
index 2cd8761e21eb..1beddba4d22e 100644
--- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.ll
@@ -77,8 +77,7 @@ define amdgpu_kernel void @v_uint_to_fp_v4i32(<4 x float> addrspace(1)* %out, <4
 }
 
 ; FUNC-LABEL: {{^}}s_uint_to_fp_i1_to_f32:
-; SI: s_cmp_eq
-; SI: s_cselect_b64 [[CMP:s\[[0-9]+:[0-9]\]]],
+; SI: v_cmp_eq_u32_e64 [[CMP:s\[[0-9]+:[0-9]\]]],
 ; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1.0, [[CMP]]
 ; SI: buffer_store_dword [[RESULT]],
 ; SI: s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
index 5ed532d9cebd..b5e0ed3d61c0 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
@@ -251,9 +251,7 @@ ENDIF:                                            ; preds = %IF, %main_body
 ; GCN: s_load_dwordx2 s{{\[}}[[COND0:[0-9]+]]:[[COND1:[0-9]+]]{{\]}}
 ; GCN: s_cmp_lt_i32 s[[COND0]], 1
 ; GCN: s_cbranch_scc1 [[EXIT:[A-Za-z0-9_]+]]
-; GCN: s_cmp_gt_i32 s[[COND1]], 0{{$}}
-; GCN: s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: s_and_b64 vcc, exec, [[MASK]]
+; GCN: v_cmp_gt_i32_e64 {{[^,]*}}, s[[COND1]], 0{{$}}
 ; GCN: s_cbranch_vccz [[BODY:[A-Za-z0-9_]+]]
 ; GCN: {{^}}[[EXIT]]:
 ; GCN: s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index a231c781b90c..0aac641dae97 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -130,27 +130,25 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_cmp_eq_u32 s3, 0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
-; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s7
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s8
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
 ; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
@@ -864,12 +862,11 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
-; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
 ; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[2:3], 0, -1, vcc
@@ -1066,12 +1063,11 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
-; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
-; GCN-IR-NEXT:    s_cmp_eq_u32 s7, 0
+; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
-; GCN-IR-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 59, v2
 ; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[2:3], 0, 0, vcc

diff  --git a/llvm/test/CodeGen/AMDGPU/v_cndmask.ll b/llvm/test/CodeGen/AMDGPU/v_cndmask.ll
index 7191f66c8da4..486886f2d164 100644
--- a/llvm/test/CodeGen/AMDGPU/v_cndmask.ll
+++ b/llvm/test/CodeGen/AMDGPU/v_cndmask.ll
@@ -8,8 +8,7 @@ declare float @llvm.fabs.f32(float)
 declare double @llvm.fabs.f64(double)
 
 ; GCN-LABEL: {{^}}v_cnd_nan_nosgpr:
-; GCN: s_cmp_eq_u32 s{{[0-9]+}}, 0
-; GCN: s_cselect_b64 [[COND:vcc|s\[[0-9]+:[0-9]+\]]], 1, 0
+; GCN: v_cmp_eq_u32_e64 [[COND:vcc|s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0
 ; GCN: v_cndmask_b32_e{{32|64}} v{{[0-9]}}, -1, v{{[0-9]+}}, [[COND]]
 ; GCN-DAG: v{{[0-9]}}
 ; All nan values are converted to 0xffffffff
@@ -31,11 +30,9 @@ define amdgpu_kernel void @v_cnd_nan_nosgpr(float addrspace(1)* %out, i32 %c, fl
 ; However on GFX10 constant bus is limited to 2 scalar operands, not one.
 
 ; GCN-LABEL: {{^}}v_cnd_nan:
-; SIVI:  s_cmp_eq_u32 s{{[0-9]+}}, 0
-; SIVI:  s_cselect_b64 vcc, 1, 0
+; SIVI:  v_cmp_eq_u32_e64 vcc, s{{[0-9]+}}, 0
 ; SIVI:  v_cndmask_b32_e32 v{{[0-9]+}}, -1, v{{[0-9]+}}, vcc
-; GFX10: s_cmp_eq_u32 s{{[0-9]+}}, 0
-; GFX10: s_cselect_b64 [[CC:s\[[0-9]+:[0-9]+\]]], 1, 0
+; GFX10: v_cmp_eq_u32_e64 [[CC:s\[[0-9:]+\]]], s{{[0-9]+}}, 0
 ; GFX10: v_cndmask_b32_e64 v{{[0-9]+}}, -1, s{{[0-9]+}}, [[CC]]
 ; GCN-DAG: v{{[0-9]}}
 ; All nan values are converted to 0xffffffff

diff  --git a/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll b/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
index 212775ae2a5e..28fb550c44af 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-alloca-bitcast.ll
@@ -13,15 +13,12 @@ target datalayout = "A5"
 ; GCN-ALLOCA-COUNT-4: buffer_store_dword
 ; GCN-ALLOCA:         buffer_load_dword
 
-; GCN-PROMOTE-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 1
-; GCN-PROMOTE-DAG: s_cselect_b64 [[CC1:[^,]+]], 1, 0
-; GCN-PROMOTE-DAG: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]]
-; GCN-PROMOTE-DAG: s_cmp_lg_u32 s{{[0-9]+}}, 2
-; GCN-PROMOTE-DAG: s_cselect_b64 [[CC2:[^,]+]], 1, 0
-; GCN-PROMOTE-DAG: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], [[CC2]]
-; GCN-PROMOTE-DAG: s_cmp_lg_u32 s{{[0-9]+}}, 3
-; GCN-PROMOTE-DAG: s_cselect_b64 [[CC3:[^,]+]], 1, 0
-; GCN-PROMOTE-DAG: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], [[CC3]]
+; GCN-PROMOTE: v_cmp_eq_u32_e64 [[CC1:[^,]+]], s{{[0-9]+}}, 1
+; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]]
+; GCN-PROMOTE: v_cmp_ne_u32_e64 [[CC2:[^,]+]], s{{[0-9]+}}, 2
+; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], [[CC2]]
+; GCN-PROMOTE: v_cmp_ne_u32_e64 [[CC3:[^,]+]], s{{[0-9]+}}, 3
+; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], [[CC3]]
 
 ; GCN-PROMOTE: ScratchSize: 0
 
@@ -323,15 +320,12 @@ entry:
 ; GCN-ALLOCA-COUNT-4: buffer_store_dword
 ; GCN-ALLOCA:         buffer_load_dword
 
-; GCN-PROMOTE-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 1
-; GCN-PROMOTE-DAG: s_cselect_b64 [[CC1:[^,]+]], 1, 0
-; GCN-PROMOTE-DAG: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]]
-; GCN-PROMOTE-DAG: s_cmp_lg_u32 s{{[0-9]+}}, 2
-; GCN-PROMOTE-DAG: s_cselect_b64 [[CC2:[^,]+]], 1, 0
-; GCN-PROMOTE-DAG: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], [[CC2]]
-; GCN-PROMOTE-DAG: s_cmp_lg_u32 s{{[0-9]+}}, 3
-; GCN-PROMOTE-DAG: s_cselect_b64 [[CC3:[^,]+]], 1, 0
-; GCN-PROMOTE-DAG: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], [[CC3]]
+; GCN-PROMOTE: v_cmp_eq_u32_e64 [[CC1:[^,]+]], s{{[0-9]+}}, 1
+; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]]
+; GCN-PROMOTE: v_cmp_ne_u32_e64 [[CC2:[^,]+]], s{{[0-9]+}}, 2
+; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], [[CC2]]
+; GCN-PROMOTE: v_cmp_ne_u32_e64 [[CC3:[^,]+]], s{{[0-9]+}}, 3
+; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], [[CC3]]
 
 ; GCN-PROMOTE: ScratchSize: 0
 

diff  --git a/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll b/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll
index 3c6f491ea737..d87e4990b255 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll
+++ b/llvm/test/CodeGen/AMDGPU/vector-extract-insert.ll
@@ -34,42 +34,35 @@ define amdgpu_kernel void @extract_insert_same_dynelt_v4i32(i32 addrspace(1)* %o
 define amdgpu_kernel void @extract_insert_
diff erent_dynelt_v4i32(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %val, i32 %idx0, i32 %idx1) #1 {
 ; GCN-LABEL: extract_insert_
diff erent_dynelt_v4i32:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
-; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xd
 ; GCN-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    s_mov_b32 s2, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b64 s[0:1], s[10:11]
+; GCN-NEXT:    s_mov_b64 s[0:1], s[6:7]
 ; GCN-NEXT:    v_lshlrev_b32_e32 v1, 4, v0
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 2, v0
 ; GCN-NEXT:    v_mov_b32_e32 v5, v2
 ; GCN-NEXT:    buffer_load_dwordx4 v[0:3], v[1:2], s[0:3], 0 addr64
-; GCN-NEXT:    s_cmp_eq_u32 s5, 3
-; GCN-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s5, 2
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s5, 1
-; GCN-NEXT:    s_mov_b64 s[10:11], s[2:3]
-; GCN-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s5, 0
-; GCN-NEXT:    v_mov_b32_e32 v6, s4
-; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s6, 1
+; GCN-NEXT:    v_mov_b32_e32 v6, s8
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 3
+; GCN-NEXT:    s_mov_b64 s[6:7], s[2:3]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
-; GCN-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v6, s[2:3]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v6, s[4:5]
-; GCN-NEXT:    s_cmp_eq_u32 s6, 2
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 2
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 1
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v6, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s10, 1
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-NEXT:    s_cselect_b64 vcc, 1, 0
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s[0:1]
-; GCN-NEXT:    s_cmp_eq_u32 s6, 3
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s10, 2
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    s_cselect_b64 vcc, 1, 0
+; GCN-NEXT:    v_cmp_eq_u32_e64 vcc, s10, 3
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
-; GCN-NEXT:    buffer_store_dword v0, v[4:5], s[8:11], 0 addr64
+; GCN-NEXT:    buffer_store_dword v0, v[4:5], s[4:7], 0 addr64
 ; GCN-NEXT:    s_endpgm
   %id = call i32 @llvm.amdgcn.workitem.id.x()
   %id.ext = sext i32 %id to i64

diff  --git a/llvm/test/CodeGen/AMDGPU/vselect.ll b/llvm/test/CodeGen/AMDGPU/vselect.ll
index d6a515baaa5d..02ffd30be5fd 100644
--- a/llvm/test/CodeGen/AMDGPU/vselect.ll
+++ b/llvm/test/CodeGen/AMDGPU/vselect.ll
@@ -7,12 +7,10 @@
 ; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Z
 ; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Y
 
-; SI: s_cmp_gt_i32
-; SI: s_cselect_b64 vcc, 1, 0
-; SI: s_cmp_gt_i32
-; SI: s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; SI-DAG: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
-; SI-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[MASK]]
+; SI: v_cmp_gt_i32_e32 vcc
+; SI: v_cndmask_b32_e32
+; SI: v_cmp_gt_i32_e32 vcc
+; SI: v_cndmask_b32_e32
 
 define amdgpu_kernel void @test_select_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in0, <2 x i32> addrspace(1)* %in1, <2 x i32> %val) {
 entry:
@@ -52,9 +50,9 @@ entry:
 ; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Z
 ; EG-DAG: CNDE_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], T[0-9]+\.[XYZW]}}, KC0[3].Y
 
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
-; SI: v_cndmask_b32_e64
+; SI: v_cndmask_b32_e32
+; SI: v_cndmask_b32_e32
+; SI: v_cndmask_b32_e32
 ; SI: v_cndmask_b32_e32
 
 define amdgpu_kernel void @test_select_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in0, <4 x i32> addrspace(1)* %in1, <4 x i32> %val) {

diff  --git a/llvm/test/CodeGen/AMDGPU/zero_extend.ll b/llvm/test/CodeGen/AMDGPU/zero_extend.ll
index 2b5150d4ed80..d5d954eaca74 100644
--- a/llvm/test/CodeGen/AMDGPU/zero_extend.ll
+++ b/llvm/test/CodeGen/AMDGPU/zero_extend.ll
@@ -37,9 +37,8 @@ define amdgpu_kernel void @s_arg_zext_i1_to_i64(i64 addrspace(1)* %out, i1 zeroe
 
 ; GCN-LABEL: {{^}}s_cmp_zext_i1_to_i64:
 ; GCN-DAG: s_mov_b32 s{{[0-9]+}}, 0
-; GCN-DAG: s_cmp_eq_u32
-; GCN:     s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN:     v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[MASK]]
+; GCN-DAG: v_cmp_eq_u32
+; GCN:     v_cndmask_b32
 define amdgpu_kernel void @s_cmp_zext_i1_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
   %cmp = icmp eq i32 %a, %b
   %ext = zext i1 %cmp to i64
@@ -55,11 +54,10 @@ define amdgpu_kernel void @s_cmp_zext_i1_to_i64(i64 addrspace(1)* %out, i32 %a,
 ; GCN: s_mov_b32 [[MASK:s[0-9]+]], 0xffff{{$}}
 ; GCN-DAG: s_and_b32 [[MASK_A:s[0-9]+]], [[A]], [[MASK]]
 ; GCN-DAG: s_and_b32 [[MASK_B:s[0-9]+]], [[B]], [[MASK]]
+; GCN: v_mov_b32_e32 [[V_B:v[0-9]+]], [[B]]
+; GCN: v_cmp_eq_u32_e32 vcc, [[MASK_A]], [[V_B]]
 
-; GCN: s_cmp_eq_u32 s{{[0-9]+}}, [[B]]
-; GCN: s_cselect_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], 1, 0
-; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[MASK]]
-
+; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc
 ; GCN: buffer_store_short [[RESULT]]
 define amdgpu_kernel void @s_cmp_zext_i1_to_i16(i16 addrspace(1)* %out, [8 x i32], i16 zeroext %a, [8 x i32], i16 zeroext %b) #0 {
   %tmp0 = icmp eq i16 %a, %b


        


More information about the llvm-commits mailing list