[llvm] 34d9a16 - AMDGPU: Add option to expand 64-bit integer division in IR

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 14 11:16:22 PST 2020


Author: Matt Arsenault
Date: 2020-02-14T11:16:08-08:00
New Revision: 34d9a16e54becbea4c790b19804d35453018a53f

URL: https://github.com/llvm/llvm-project/commit/34d9a16e54becbea4c790b19804d35453018a53f
DIFF: https://github.com/llvm/llvm-project/commit/34d9a16e54becbea4c790b19804d35453018a53f.diff

LOG: AMDGPU: Add option to expand 64-bit integer division in IR

I didn't realize we were already expanding 24/32-bit division here
already. Use the available IntegerDivision utilities. This uses loops,
so produces significantly smaller code than the inline DAG expansion.

This now requires width reductions of 64-bit divisions before
introducing the expanded loops.

This helps work around missing legalization in GlobalISel for
division, which are the only remaining core instructions that didn't
work at all.

I think this is plausibly a better implementation than exists in the
DAG, although turning it on by default misses out on the constant
value optimizations and also needs benchmarking.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
    llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
    llvm/test/CodeGen/AMDGPU/sdiv64.ll
    llvm/test/CodeGen/AMDGPU/srem64.ll
    llvm/test/CodeGen/AMDGPU/udiv64.ll
    llvm/test/CodeGen/AMDGPU/urem64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index 8840b0a180c0..a63da457ff71 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -39,6 +39,7 @@
 #include "llvm/IR/Operator.h"
 #include "llvm/IR/Type.h"
 #include "llvm/IR/Value.h"
+#include "llvm/Transforms/Utils/IntegerDivision.h"
 #include "llvm/InitializePasses.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/Casting.h"
@@ -63,6 +64,12 @@ static cl::opt<bool> UseMul24Intrin(
   cl::ReallyHidden,
   cl::init(true));
 
+static cl::opt<bool> ExpandDiv64InIR(
+  "amdgpu-codegenprepare-expand-div64",
+  cl::desc("Expand 64-bit division in AMDGPUCodeGenPrepare"),
+  cl::ReallyHidden,
+  cl::init(false));
+
 class AMDGPUCodeGenPrepare : public FunctionPass,
                              public InstVisitor<AMDGPUCodeGenPrepare, bool> {
   const GCNSubtarget *ST = nullptr;
@@ -160,16 +167,27 @@ class AMDGPUCodeGenPrepare : public FunctionPass,
 
   bool divHasSpecialOptimization(BinaryOperator &I,
                                  Value *Num, Value *Den) const;
+  int getDivNumBits(BinaryOperator &I,
+                    Value *Num, Value *Den,
+                    unsigned AtLeast, bool Signed) const;
 
   /// Expands 24 bit div or rem.
   Value* expandDivRem24(IRBuilder<> &Builder, BinaryOperator &I,
                         Value *Num, Value *Den,
                         bool IsDiv, bool IsSigned) const;
 
+  Value *expandDivRem24Impl(IRBuilder<> &Builder, BinaryOperator &I,
+                            Value *Num, Value *Den, unsigned NumBits,
+                            bool IsDiv, bool IsSigned) const;
+
   /// Expands 32 bit div or rem.
   Value* expandDivRem32(IRBuilder<> &Builder, BinaryOperator &I,
                         Value *Num, Value *Den) const;
 
+  Value *shrinkDivRem64(IRBuilder<> &Builder, BinaryOperator &I,
+                        Value *Num, Value *Den) const;
+  void expandDivRem64(BinaryOperator &I) const;
+
   /// Widen a scalar load.
   ///
   /// \details \p Widen scalar load for uniform, small type loads from constant
@@ -806,30 +824,49 @@ static Value* getMulHu(IRBuilder<> &Builder, Value *LHS, Value *RHS) {
   return getMul64(Builder, LHS, RHS).second;
 }
 
-// The fractional part of a float is enough to accurately represent up to
-// a 24-bit signed integer.
-Value* AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder,
-                                            BinaryOperator &I,
-                                            Value *Num, Value *Den,
-                                            bool IsDiv, bool IsSigned) const {
-  assert(Num->getType()->isIntegerTy(32));
-
+/// Figure out how many bits are really needed for this ddivision. \p AtLeast is
+/// an optimization hint to bypass the second ComputeNumSignBits call if we the
+/// first one is insufficient. Returns -1 on failure.
+int AMDGPUCodeGenPrepare::getDivNumBits(BinaryOperator &I,
+                                        Value *Num, Value *Den,
+                                        unsigned AtLeast, bool IsSigned) const {
   const DataLayout &DL = Mod->getDataLayout();
   unsigned LHSSignBits = ComputeNumSignBits(Num, DL, 0, AC, &I);
-  if (LHSSignBits < 9)
-    return nullptr;
+  if (LHSSignBits < AtLeast)
+    return -1;
 
   unsigned RHSSignBits = ComputeNumSignBits(Den, DL, 0, AC, &I);
-  if (RHSSignBits < 9)
-    return nullptr;
-
+  if (RHSSignBits < AtLeast)
+    return -1;
 
   unsigned SignBits = std::min(LHSSignBits, RHSSignBits);
-  unsigned DivBits = 32 - SignBits;
+  unsigned DivBits = Num->getType()->getScalarSizeInBits() - SignBits;
   if (IsSigned)
     ++DivBits;
+  return DivBits;
+}
+
+// The fractional part of a float is enough to accurately represent up to
+// a 24-bit signed integer.
+Value *AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder,
+                                            BinaryOperator &I,
+                                            Value *Num, Value *Den,
+                                            bool IsDiv, bool IsSigned) const {
+  int DivBits = getDivNumBits(I, Num, Den, 9, IsSigned);
+  if (DivBits == -1)
+    return nullptr;
+  return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned);
+}
 
+Value *AMDGPUCodeGenPrepare::expandDivRem24Impl(IRBuilder<> &Builder,
+                                                BinaryOperator &I,
+                                                Value *Num, Value *Den,
+                                                unsigned DivBits,
+                                                bool IsDiv, bool IsSigned) const {
   Type *I32Ty = Builder.getInt32Ty();
+  Num = Builder.CreateTrunc(Num, I32Ty);
+  Den = Builder.CreateTrunc(Den, I32Ty);
+
   Type *F32Ty = Builder.getFloatTy();
   ConstantInt *One = Builder.getInt32(1);
   Value *JQ = One;
@@ -901,13 +938,18 @@ Value* AMDGPUCodeGenPrepare::expandDivRem24(IRBuilder<> &Builder,
     Res = Builder.CreateSub(Num, Rem);
   }
 
-  // Extend in register from the number of bits this divide really is.
-  if (IsSigned) {
-    Res = Builder.CreateShl(Res, 32 - DivBits);
-    Res = Builder.CreateAShr(Res, 32 - DivBits);
-  } else {
-    ConstantInt *TruncMask = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
-    Res = Builder.CreateAnd(Res, TruncMask);
+  if (DivBits != 0 && DivBits < 32) {
+    // Extend in register from the number of bits this divide really is.
+    if (IsSigned) {
+      int InRegBits = 32 - DivBits;
+
+      Res = Builder.CreateShl(Res, InRegBits);
+      Res = Builder.CreateAShr(Res, InRegBits);
+    } else {
+      ConstantInt *TruncMask
+        = Builder.getInt32((UINT64_C(1) << DivBits) - 1);
+      Res = Builder.CreateAnd(Res, TruncMask);
+    }
   }
 
   return Res;
@@ -981,8 +1023,8 @@ Value* AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder,
   }
 
   if (Value *Res = expandDivRem24(Builder, I, Num, Den, IsDiv, IsSigned)) {
-    Res = Builder.CreateTrunc(Res, Ty);
-    return Res;
+    return IsSigned ? Builder.CreateSExtOrTrunc(Res, Ty) :
+                      Builder.CreateZExtOrTrunc(Res, Ty);
   }
 
   ConstantInt *Zero = Builder.getInt32(0);
@@ -1093,6 +1135,53 @@ Value* AMDGPUCodeGenPrepare::expandDivRem32(IRBuilder<> &Builder,
   return Res;
 }
 
+Value *AMDGPUCodeGenPrepare::shrinkDivRem64(IRBuilder<> &Builder,
+                                            BinaryOperator &I,
+                                            Value *Num, Value *Den) const {
+  if (!ExpandDiv64InIR && divHasSpecialOptimization(I, Num, Den))
+    return nullptr;  // Keep it for later optimization.
+
+  Instruction::BinaryOps Opc = I.getOpcode();
+
+  bool IsDiv = Opc == Instruction::SDiv || Opc == Instruction::UDiv;
+  bool IsSigned = Opc == Instruction::SDiv || Opc == Instruction::SRem;
+
+  int NumDivBits = getDivNumBits(I, Num, Den, 32, IsSigned);
+  if (NumDivBits == -1)
+    return nullptr;
+
+  Value *Narrowed = nullptr;
+  if (NumDivBits <= 24) {
+    Narrowed = expandDivRem24Impl(Builder, I, Num, Den, NumDivBits,
+                                  IsDiv, IsSigned);
+  } else if (NumDivBits <= 32) {
+    Narrowed = expandDivRem32(Builder, I, Num, Den);
+  }
+
+  if (Narrowed) {
+    return IsSigned ? Builder.CreateSExt(Narrowed, Num->getType()) :
+                      Builder.CreateZExt(Narrowed, Num->getType());
+  }
+
+  return nullptr;
+}
+
+void AMDGPUCodeGenPrepare::expandDivRem64(BinaryOperator &I) const {
+  Instruction::BinaryOps Opc = I.getOpcode();
+  // Do the general expansion.
+  if (Opc == Instruction::UDiv || Opc == Instruction::SDiv) {
+    expandDivisionUpTo64Bits(&I);
+    return;
+  }
+
+  if (Opc == Instruction::URem || Opc == Instruction::SRem) {
+    expandRemainderUpTo64Bits(&I);
+    return;
+  }
+
+  llvm_unreachable("not a division");
+}
+
 bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
   if (foldBinOpIntoSelect(I))
     return true;
@@ -1108,9 +1197,13 @@ bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
   Instruction::BinaryOps Opc = I.getOpcode();
   Type *Ty = I.getType();
   Value *NewDiv = nullptr;
+  unsigned ScalarSize = Ty->getScalarSizeInBits();
+
+  SmallVector<BinaryOperator *, 8> Div64ToExpand;
+
   if ((Opc == Instruction::URem || Opc == Instruction::UDiv ||
        Opc == Instruction::SRem || Opc == Instruction::SDiv) &&
-      Ty->getScalarSizeInBits() <= 32) {
+      ScalarSize <= 64) {
     Value *Num = I.getOperand(0);
     Value *Den = I.getOperand(1);
     IRBuilder<> Builder(&I);
@@ -1122,13 +1215,35 @@ bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
       for (unsigned N = 0, E = VT->getNumElements(); N != E; ++N) {
         Value *NumEltN = Builder.CreateExtractElement(Num, N);
         Value *DenEltN = Builder.CreateExtractElement(Den, N);
-        Value *NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN);
-        if (!NewElt)
-          NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
+
+        Value *NewElt;
+        if (ScalarSize <= 32) {
+          NewElt = expandDivRem32(Builder, I, NumEltN, DenEltN);
+          if (!NewElt)
+            NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
+        } else {
+          // See if this 64-bit division can be shrunk to 32/24-bits before
+          // producing the general expansion.
+          NewElt = shrinkDivRem64(Builder, I, NumEltN, DenEltN);
+          if (!NewElt) {
+            // The general 64-bit expansion introduces control flow and doesn't
+            // return the new value. Just insert a scalar copy and defer
+            // expanding it.
+            NewElt = Builder.CreateBinOp(Opc, NumEltN, DenEltN);
+            Div64ToExpand.push_back(cast<BinaryOperator>(NewElt));
+          }
+        }
+
         NewDiv = Builder.CreateInsertElement(NewDiv, NewElt, N);
       }
     } else {
-      NewDiv = expandDivRem32(Builder, I, Num, Den);
+      if (ScalarSize <= 32)
+        NewDiv = expandDivRem32(Builder, I, Num, Den);
+      else {
+        NewDiv = shrinkDivRem64(Builder, I, Num, Den);
+        if (!NewDiv)
+          Div64ToExpand.push_back(&I);
+      }
     }
 
     if (NewDiv) {
@@ -1138,6 +1253,14 @@ bool AMDGPUCodeGenPrepare::visitBinaryOperator(BinaryOperator &I) {
     }
   }
 
+  if (ExpandDiv64InIR) {
+    // TODO: We get much worse code in specially handled constant cases.
+    for (BinaryOperator *Div : Div64ToExpand) {
+      expandDivRem64(*Div);
+      Changed = true;
+    }
+  }
+
   return Changed;
 }
 
@@ -1255,11 +1378,25 @@ bool AMDGPUCodeGenPrepare::runOnFunction(Function &F) {
 
   bool MadeChange = false;
 
-  for (BasicBlock &BB : F) {
+  Function::iterator NextBB;
+  for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; FI = NextBB) {
+    BasicBlock *BB = &*FI;
+    NextBB = std::next(FI);
+
     BasicBlock::iterator Next;
-    for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; I = Next) {
+    for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; I = Next) {
       Next = std::next(I);
+
       MadeChange |= visit(*I);
+
+      if (Next != E) { // Control flow changed
+        BasicBlock *NextInstBB = Next->getParent();
+        if (NextInstBB != BB) {
+          BB = NextInstBB;
+          E = BB->end();
+          FE = F.end();
+        }
+      }
     }
   }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index daf56e41522a..0f4c09433c1c 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -5505,8 +5505,13 @@ define amdgpu_kernel void @udiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 
 define amdgpu_kernel void @udiv_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
 ; CHECK-LABEL: @udiv_v2i64_pow2k_denom(
-; CHECK-NEXT:    [[R:%.*]] = udiv <2 x i64> [[X:%.*]], <i64 4096, i64 4096>
-; CHECK-NEXT:    store <2 x i64> [[R]], <2 x i64> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = udiv i64 [[TMP1]], 4096
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = udiv i64 [[TMP4]], 4096
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v2i64_pow2k_denom:
@@ -5516,8 +5521,8 @@ define amdgpu_kernel void @udiv_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_lshr_b64 s[2:3], s[2:3], 12
 ; GCN-NEXT:    s_lshr_b64 s[0:1], s[0:1], 12
+; GCN-NEXT:    s_lshr_b64 s[2:3], s[2:3], 12
 ; GCN-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s2
@@ -5531,8 +5536,13 @@ define amdgpu_kernel void @udiv_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 
 define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
 ; CHECK-LABEL: @udiv_v2i64_mixed_pow2k_denom(
-; CHECK-NEXT:    [[R:%.*]] = udiv <2 x i64> [[X:%.*]], <i64 4096, i64 4095>
-; CHECK-NEXT:    store <2 x i64> [[R]], <2 x i64> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = udiv i64 [[TMP1]], 4096
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = udiv i64 [[TMP4]], 4095
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v2i64_mixed_pow2k_denom:
@@ -5540,7 +5550,7 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_mov_b32_e32 v0, 0x4f800000
 ; GCN-NEXT:    v_madak_f32 v0, 0, v0, 0x457ff000
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    s_movk_i32 s4, 0xf001
+; GCN-NEXT:    s_movk_i32 s6, 0xf001
 ; GCN-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -5549,11 +5559,13 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xd
+; GCN-NEXT:    s_movk_i32 s0, 0xfff
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s6
+; GCN-NEXT:    v_mul_lo_u32 v5, v1, s6
+; GCN-NEXT:    v_mul_lo_u32 v4, v0, s6
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, -1
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s4
-; GCN-NEXT:    v_mul_lo_u32 v5, v1, s4
-; GCN-NEXT:    v_mul_lo_u32 v4, v0, s4
 ; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, v0, v3
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
 ; GCN-NEXT:    v_mul_hi_u32 v6, v0, v4
@@ -5571,19 +5583,17 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
 ; GCN-NEXT:    v_add_i32_e64 v0, s[2:3], v0, v3
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v7, v5, vcc
-; GCN-NEXT:    v_mul_hi_u32 v5, v0, s4
+; GCN-NEXT:    v_mul_hi_u32 v5, v0, s6
 ; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v1, v4, s[2:3]
-; GCN-NEXT:    v_mul_lo_u32 v6, v3, s4
-; GCN-NEXT:    v_mul_lo_u32 v8, v0, s4
+; GCN-NEXT:    v_mul_lo_u32 v6, v3, s6
+; GCN-NEXT:    v_mul_lo_u32 v8, v0, s6
 ; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, v0, v5
-; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xd
+; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; GCN-NEXT:    v_mul_lo_u32 v6, v0, v5
 ; GCN-NEXT:    v_mul_hi_u32 v9, v0, v8
 ; GCN-NEXT:    v_mul_hi_u32 v10, v0, v5
 ; GCN-NEXT:    v_mul_hi_u32 v11, v3, v5
-; GCN-NEXT:    s_movk_i32 s0, 0xfff
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
 ; GCN-NEXT:    v_addc_u32_e32 v9, vcc, v7, v10, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v10, v3, v8
@@ -5608,6 +5618,7 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v7, v5, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GCN-NEXT:    s_lshr_b64 s[2:3], s[8:9], 12
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v4, v0, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v6, v2, vcc
@@ -5641,9 +5652,8 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, v1, v3, s[0:1]
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v7, v5, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, v0, v1, s[0:1]
-; GCN-NEXT:    s_lshr_b64 s[0:1], s[8:9], 12
-; GCN-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %r = udiv <2 x i64> %x, <i64 4096, i64 4095>
@@ -5654,8 +5664,15 @@ define amdgpu_kernel void @udiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 define amdgpu_kernel void @udiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x, <2 x i64> %y) {
 ; CHECK-LABEL: @udiv_v2i64_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i64> <i64 4096, i64 4096>, [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = udiv <2 x i64> [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store <2 x i64> [[R]], <2 x i64> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = udiv i64 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> undef, i64 [[TMP3]], i64 0
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i64> [[X]], i64 1
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
+; CHECK-NEXT:    [[TMP7:%.*]] = udiv i64 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
+; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: udiv_v2i64_pow2_shl_denom:
@@ -5666,10 +5683,10 @@ define amdgpu_kernel void @udiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_add_i32 s1, s2, 12
 ; GCN-NEXT:    s_add_i32 s0, s0, 12
-; GCN-NEXT:    s_lshr_b64 s[2:3], s[10:11], s1
+; GCN-NEXT:    s_add_i32 s2, s2, 12
 ; GCN-NEXT:    s_lshr_b64 s[0:1], s[8:9], s0
+; GCN-NEXT:    s_lshr_b64 s[2:3], s[10:11], s2
 ; GCN-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s2
@@ -5874,8 +5891,13 @@ define amdgpu_kernel void @urem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 
 define amdgpu_kernel void @urem_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
 ; CHECK-LABEL: @urem_v2i64_pow2k_denom(
-; CHECK-NEXT:    [[R:%.*]] = urem <2 x i64> [[X:%.*]], <i64 4096, i64 4096>
-; CHECK-NEXT:    store <2 x i64> [[R]], <2 x i64> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = urem i64 [[TMP1]], 4096
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = urem i64 [[TMP4]], 4096
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_v2i64_pow2k_denom:
@@ -5887,8 +5909,8 @@ define amdgpu_kernel void @urem_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_and_b32 s1, s2, s8
 ; GCN-NEXT:    s_and_b32 s0, s0, s8
+; GCN-NEXT:    s_and_b32 s1, s2, s8
 ; GCN-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-NEXT:    v_mov_b32_e32 v2, s1
 ; GCN-NEXT:    v_mov_b32_e32 v3, v1
@@ -5902,8 +5924,15 @@ define amdgpu_kernel void @urem_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 define amdgpu_kernel void @urem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x, <2 x i64> %y) {
 ; CHECK-LABEL: @urem_v2i64_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i64> <i64 4096, i64 4096>, [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = urem <2 x i64> [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store <2 x i64> [[R]], <2 x i64> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = urem i64 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> undef, i64 [[TMP3]], i64 0
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i64> [[X]], i64 1
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
+; CHECK-NEXT:    [[TMP7:%.*]] = urem i64 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
+; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: urem_v2i64_pow2_shl_denom:
@@ -5916,14 +5945,14 @@ define amdgpu_kernel void @urem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_lshl_b64 s[0:1], s[12:13], s0
 ; GCN-NEXT:    s_lshl_b64 s[2:3], s[12:13], s2
-; GCN-NEXT:    s_add_u32 s2, s2, -1
-; GCN-NEXT:    s_addc_u32 s3, s3, -1
-; GCN-NEXT:    s_and_b64 s[2:3], s[10:11], s[2:3]
+; GCN-NEXT:    s_lshl_b64 s[0:1], s[12:13], s0
 ; GCN-NEXT:    s_add_u32 s0, s0, -1
 ; GCN-NEXT:    s_addc_u32 s1, s1, -1
 ; GCN-NEXT:    s_and_b64 s[0:1], s[8:9], s[0:1]
+; GCN-NEXT:    s_add_u32 s2, s2, -1
+; GCN-NEXT:    s_addc_u32 s3, s3, -1
+; GCN-NEXT:    s_and_b64 s[2:3], s[10:11], s[2:3]
 ; GCN-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s2
@@ -6249,8 +6278,13 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 
 define amdgpu_kernel void @sdiv_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
 ; CHECK-LABEL: @sdiv_v2i64_pow2k_denom(
-; CHECK-NEXT:    [[R:%.*]] = sdiv <2 x i64> [[X:%.*]], <i64 4096, i64 4096>
-; CHECK-NEXT:    store <2 x i64> [[R]], <2 x i64> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = sdiv i64 [[TMP1]], 4096
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = sdiv i64 [[TMP4]], 4096
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_v2i64_pow2k_denom:
@@ -6260,16 +6294,16 @@ define amdgpu_kernel void @sdiv_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s8, s3, 31
-; GCN-NEXT:    s_lshr_b32 s8, s8, 20
-; GCN-NEXT:    s_add_u32 s2, s2, s8
-; GCN-NEXT:    s_addc_u32 s3, s3, 0
 ; GCN-NEXT:    s_ashr_i32 s8, s1, 31
-; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 12
 ; GCN-NEXT:    s_lshr_b32 s8, s8, 20
 ; GCN-NEXT:    s_add_u32 s0, s0, s8
 ; GCN-NEXT:    s_addc_u32 s1, s1, 0
+; GCN-NEXT:    s_ashr_i32 s8, s3, 31
 ; GCN-NEXT:    s_ashr_i64 s[0:1], s[0:1], 12
+; GCN-NEXT:    s_lshr_b32 s8, s8, 20
+; GCN-NEXT:    s_add_u32 s2, s2, s8
+; GCN-NEXT:    s_addc_u32 s3, s3, 0
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 12
 ; GCN-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s2
@@ -6283,101 +6317,112 @@ define amdgpu_kernel void @sdiv_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 
 define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
 ; CHECK-LABEL: @ssdiv_v2i64_mixed_pow2k_denom(
-; CHECK-NEXT:    [[R:%.*]] = sdiv <2 x i64> [[X:%.*]], <i64 4096, i64 4095>
-; CHECK-NEXT:    store <2 x i64> [[R]], <2 x i64> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = sdiv i64 [[TMP1]], 4096
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = sdiv i64 [[TMP4]], 4095
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: ssdiv_v2i64_mixed_pow2k_denom:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    v_mov_b32_e32 v0, 0x4f800000
-; GCN-NEXT:    v_madak_f32 v0, 0, v0, 0x457ff000
+; GCN-NEXT:    v_mov_b32_e32 v0, 0x457ff000
+; GCN-NEXT:    v_mov_b32_e32 v1, 0x4f800000
+; GCN-NEXT:    v_mac_f32_e32 v0, 0, v1
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    s_movk_i32 s6, 0xf001
-; GCN-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xd
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xd
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    v_mul_hi_u32 v3, s6, v0
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, s6
-; GCN-NEXT:    v_mul_lo_u32 v4, v0, s6
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_ashr_i32 s0, s9, 31
+; GCN-NEXT:    s_lshr_b32 s0, s0, 20
+; GCN-NEXT:    v_mul_hi_u32 v2, s6, v0
+; GCN-NEXT:    v_mul_lo_u32 v3, v1, s6
+; GCN-NEXT:    s_add_u32 s2, s8, s0
+; GCN-NEXT:    s_addc_u32 s3, s9, 0
+; GCN-NEXT:    s_ashr_i32 s8, s11, 31
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
+; GCN-NEXT:    v_mul_lo_u32 v3, v0, s6
 ; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v9, v1, v2
+; GCN-NEXT:    v_mul_lo_u32 v4, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v6, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v5, v0, v3
+; GCN-NEXT:    v_mul_hi_u32 v7, v1, v2
 ; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v7, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v8, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v8, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v4, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v9, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_add_i32_e64 v0, s[2:3], v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v7, v4, vcc
-; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[2:3]
-; GCN-NEXT:    v_mul_lo_u32 v4, v2, s6
-; GCN-NEXT:    v_mul_hi_u32 v6, s6, v0
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 12
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
+; GCN-NEXT:    v_mul_lo_u32 v6, v1, v3
+; GCN-NEXT:    v_mul_hi_u32 v3, v1, v3
+; GCN-NEXT:    s_mov_b32 s9, s8
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, s6
-; GCN-NEXT:    v_subrev_i32_e32 v4, vcc, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v10, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v12, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v11, v0, v6
-; GCN-NEXT:    v_mul_hi_u32 v9, v2, v6
-; GCN-NEXT:    v_mul_lo_u32 v6, v2, v6
-; GCN-NEXT:    v_mul_hi_u32 v8, v2, v4
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v3, vcc
+; GCN-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v2
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v5, vcc
+; GCN-NEXT:    v_addc_u32_e64 v2, vcc, v1, v3, s[0:1]
+; GCN-NEXT:    v_mul_lo_u32 v5, v2, s6
+; GCN-NEXT:    v_mul_hi_u32 v7, s6, v0
+; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
+; GCN-NEXT:    v_mul_lo_u32 v7, v0, s6
+; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, v0, v5
+; GCN-NEXT:    v_mul_lo_u32 v10, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v12, v0, v5
+; GCN-NEXT:    v_mul_hi_u32 v11, v0, v7
+; GCN-NEXT:    v_mul_hi_u32 v9, v2, v7
+; GCN-NEXT:    v_mul_lo_u32 v7, v2, v7
+; GCN-NEXT:    v_mul_hi_u32 v8, v2, v5
 ; GCN-NEXT:    v_add_i32_e32 v10, vcc, v11, v10
-; GCN-NEXT:    v_addc_u32_e32 v11, vcc, v7, v12, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v2, v4
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v6, v10
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v11, v9, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v8, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v6, v2
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v7, v4, vcc
+; GCN-NEXT:    v_addc_u32_e32 v11, vcc, 0, v12, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, v5
+; GCN-NEXT:    v_add_i32_e32 v7, vcc, v7, v10
+; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v11, v9, vcc
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v8, v4, vcc
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
+; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v6, v5, vcc
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v4, s[2:3]
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s2, s11, 31
-; GCN-NEXT:    s_add_u32 s0, s10, s2
+; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[0:1]
+; GCN-NEXT:    s_add_u32 s0, s10, s8
+; GCN-NEXT:    s_addc_u32 s1, s11, s8
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    s_mov_b32 s3, s2
-; GCN-NEXT:    s_addc_u32 s1, s11, s2
-; GCN-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-NEXT:    s_xor_b64 s[0:1], s[0:1], s[8:9]
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v2, s0, v1
 ; GCN-NEXT:    v_mul_hi_u32 v3, s0, v0
-; GCN-NEXT:    v_mul_hi_u32 v4, s0, v1
-; GCN-NEXT:    v_mul_hi_u32 v6, s1, v1
+; GCN-NEXT:    v_mul_hi_u32 v5, s0, v1
+; GCN-NEXT:    v_mul_hi_u32 v7, s1, v1
 ; GCN-NEXT:    v_mul_lo_u32 v1, s1, v1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v7, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s1, v0
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-NEXT:    v_mul_lo_u32 v5, s1, v0
 ; GCN-NEXT:    v_mul_hi_u32 v0, s1, v0
-; GCN-NEXT:    s_movk_i32 s3, 0xfff
+; GCN-NEXT:    s_movk_i32 s9, 0xfff
 ; GCN-NEXT:    s_mov_b32 s6, -1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v6, v5, vcc
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v7, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, s3
-; GCN-NEXT:    v_mul_hi_u32 v3, s3, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, v0, s3
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v6, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v2, v1, s9
+; GCN-NEXT:    v_mul_hi_u32 v3, s9, v0
+; GCN-NEXT:    v_mul_lo_u32 v4, v0, s9
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s0, v4
 ; GCN-NEXT:    v_mov_b32_e32 v3, s1
 ; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v3, v2, vcc
-; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s3, v4
+; GCN-NEXT:    v_subrev_i32_e32 v3, vcc, s9, v4
 ; GCN-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v2, vcc
 ; GCN-NEXT:    s_movk_i32 s0, 0xffe
 ; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s0, v3
@@ -6394,22 +6439,17 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, -1, v4, s[0:1]
 ; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v8, v6, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v7, v5, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    s_ashr_i32 s0, s9, 31
-; GCN-NEXT:    s_lshr_b32 s0, s0, 20
-; GCN-NEXT:    s_add_u32 s0, s8, s0
-; GCN-NEXT:    s_addc_u32 s1, s9, 0
-; GCN-NEXT:    v_xor_b32_e32 v0, s2, v0
-; GCN-NEXT:    v_xor_b32_e32 v1, s2, v1
-; GCN-NEXT:    v_mov_b32_e32 v3, s2
-; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s2, v0
-; GCN-NEXT:    s_ashr_i64 s[0:1], s[0:1], 12
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v8, v6, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
+; GCN-NEXT:    v_xor_b32_e32 v0, s8, v0
+; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s8, v0
+; GCN-NEXT:    v_xor_b32_e32 v1, s8, v1
+; GCN-NEXT:    v_mov_b32_e32 v3, s8
 ; GCN-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %r = sdiv <2 x i64> %x, <i64 4096, i64 4095>
@@ -6420,8 +6460,15 @@ define amdgpu_kernel void @ssdiv_v2i64_mixed_pow2k_denom(<2 x i64> addrspace(1)*
 define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x, <2 x i64> %y) {
 ; CHECK-LABEL: @sdiv_v2i64_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i64> <i64 4096, i64 4096>, [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = sdiv <2 x i64> [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store <2 x i64> [[R]], <2 x i64> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = sdiv i64 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> undef, i64 [[TMP3]], i64 0
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i64> [[X]], i64 1
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
+; CHECK-NEXT:    [[TMP7:%.*]] = sdiv i64 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
+; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: sdiv_v2i64_pow2_shl_denom:
@@ -6432,8 +6479,8 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    s_mov_b32 s18, 0x4f800000
 ; GCN-NEXT:    s_mov_b32 s19, 0x5f7ffffc
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_lshl_b64 s[12:13], s[2:3], s4
-; GCN-NEXT:    s_lshl_b64 s[2:3], s[2:3], s6
+; GCN-NEXT:    s_lshl_b64 s[12:13], s[2:3], s6
+; GCN-NEXT:    s_lshl_b64 s[2:3], s[2:3], s4
 ; GCN-NEXT:    s_ashr_i32 s16, s3, 31
 ; GCN-NEXT:    s_add_u32 s2, s2, s16
 ; GCN-NEXT:    s_mov_b32 s17, s16
@@ -6503,22 +6550,22 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[2:3]
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s2, s11, 31
-; GCN-NEXT:    s_add_u32 s0, s10, s2
+; GCN-NEXT:    s_ashr_i32 s2, s9, 31
+; GCN-NEXT:    s_add_u32 s0, s8, s2
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    s_mov_b32 s3, s2
-; GCN-NEXT:    s_addc_u32 s1, s11, s2
-; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[2:3]
+; GCN-NEXT:    s_addc_u32 s1, s9, s2
+; GCN-NEXT:    s_xor_b64 s[8:9], s[0:1], s[2:3]
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
-; GCN-NEXT:    v_mul_hi_u32 v5, s10, v1
-; GCN-NEXT:    v_mul_hi_u32 v7, s11, v1
-; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
+; GCN-NEXT:    v_mul_lo_u32 v2, s8, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s8, v0
+; GCN-NEXT:    v_mul_hi_u32 v5, s8, v1
+; GCN-NEXT:    v_mul_hi_u32 v7, s9, v1
+; GCN-NEXT:    v_mul_lo_u32 v1, s9, v1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GCN-NEXT:    v_mul_lo_u32 v5, s9, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s9, v0
 ; GCN-NEXT:    s_xor_b64 s[2:3], s[2:3], s[16:17]
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
@@ -6533,8 +6580,8 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_mul_lo_u32 v3, s14, v0
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GCN-NEXT:    v_sub_i32_e32 v5, vcc, s11, v2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s10, v3
+; GCN-NEXT:    v_sub_i32_e32 v5, vcc, s9, v2
+; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s8, v3
 ; GCN-NEXT:    v_subb_u32_e64 v5, s[0:1], v5, v7, vcc
 ; GCN-NEXT:    v_subrev_i32_e64 v7, s[0:1], s14, v3
 ; GCN-NEXT:    v_subbrev_u32_e64 v5, s[0:1], 0, v5, s[0:1]
@@ -6548,14 +6595,14 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
 ; GCN-NEXT:    v_add_i32_e64 v9, s[0:1], 1, v0
 ; GCN-NEXT:    v_addc_u32_e64 v10, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    s_ashr_i32 s10, s13, 31
+; GCN-NEXT:    s_ashr_i32 s8, s13, 31
 ; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
-; GCN-NEXT:    s_add_u32 s12, s12, s10
+; GCN-NEXT:    s_add_u32 s12, s12, s8
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, v10, v8, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v8, s11
-; GCN-NEXT:    s_mov_b32 s11, s10
-; GCN-NEXT:    s_addc_u32 s13, s13, s10
-; GCN-NEXT:    s_xor_b64 s[12:13], s[12:13], s[10:11]
+; GCN-NEXT:    v_mov_b32_e32 v8, s9
+; GCN-NEXT:    s_mov_b32 s9, s8
+; GCN-NEXT:    s_addc_u32 s13, s13, s8
+; GCN-NEXT:    s_xor_b64 s[12:13], s[12:13], s[8:9]
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v10, s12
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v11, s13
 ; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v8, v2, vcc
@@ -6624,42 +6671,42 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v9, v3
 ; GCN-NEXT:    v_addc_u32_e32 v8, vcc, v6, v8, vcc
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; GCN-NEXT:    s_ashr_i32 s14, s9, 31
+; GCN-NEXT:    s_ashr_i32 s14, s11, 31
 ; GCN-NEXT:    v_addc_u32_e64 v5, vcc, v5, v8, s[0:1]
-; GCN-NEXT:    s_add_u32 s0, s8, s14
+; GCN-NEXT:    s_add_u32 s0, s10, s14
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GCN-NEXT:    s_mov_b32 s15, s14
-; GCN-NEXT:    s_addc_u32 s1, s9, s14
-; GCN-NEXT:    s_xor_b64 s[8:9], s[0:1], s[14:15]
+; GCN-NEXT:    s_addc_u32 s1, s11, s14
+; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[14:15]
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; GCN-NEXT:    v_mul_lo_u32 v5, s8, v3
-; GCN-NEXT:    v_mul_hi_u32 v7, s8, v2
-; GCN-NEXT:    v_mul_hi_u32 v9, s8, v3
-; GCN-NEXT:    v_mul_hi_u32 v10, s9, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s9, v3
+; GCN-NEXT:    v_mul_lo_u32 v5, s10, v3
+; GCN-NEXT:    v_mul_hi_u32 v7, s10, v2
+; GCN-NEXT:    v_mul_hi_u32 v9, s10, v3
+; GCN-NEXT:    v_mul_hi_u32 v10, s11, v3
+; GCN-NEXT:    v_mul_lo_u32 v3, s11, v3
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v9, vcc
-; GCN-NEXT:    v_mul_lo_u32 v9, s9, v2
-; GCN-NEXT:    v_mul_hi_u32 v2, s9, v2
+; GCN-NEXT:    v_mul_lo_u32 v9, s11, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
 ; GCN-NEXT:    v_mov_b32_e32 v8, s3
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v9, v5
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v2, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v10, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v2, v3
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v6, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v6, s12, v4
-; GCN-NEXT:    v_mul_hi_u32 v7, s12, v5
-; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s2, v0
-; GCN-NEXT:    v_mul_lo_u32 v0, s13, v5
-; GCN-NEXT:    v_subb_u32_e32 v3, vcc, v1, v8, vcc
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v7, v6
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v4, vcc
+; GCN-NEXT:    v_mul_lo_u32 v4, s12, v3
+; GCN-NEXT:    v_mul_hi_u32 v5, s12, v2
+; GCN-NEXT:    v_mul_lo_u32 v6, s13, v2
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
+; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v8, vcc
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
+; GCN-NEXT:    v_mul_lo_u32 v5, s12, v2
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
+; GCN-NEXT:    v_sub_i32_e32 v6, vcc, s11, v4
 ; GCN-NEXT:    v_mov_b32_e32 v7, s13
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_mul_lo_u32 v1, s12, v5
-; GCN-NEXT:    v_sub_i32_e32 v6, vcc, s9, v0
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s8, v1
+; GCN-NEXT:    v_sub_i32_e32 v5, vcc, s10, v5
 ; GCN-NEXT:    v_subb_u32_e64 v6, s[0:1], v6, v7, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v7, s[0:1], s12, v1
+; GCN-NEXT:    v_subrev_i32_e64 v7, s[0:1], s12, v5
 ; GCN-NEXT:    v_subbrev_u32_e64 v6, s[0:1], 0, v6, s[0:1]
 ; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s13, v6
 ; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[0:1]
@@ -6667,30 +6714,30 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s13, v6
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, v8, v7, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 2, v5
-; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v4, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v9, s[0:1], 1, v5
-; GCN-NEXT:    v_addc_u32_e64 v10, s[0:1], 0, v4, s[0:1]
+; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 2, v2
+; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v3, s[0:1]
+; GCN-NEXT:    v_add_i32_e64 v9, s[0:1], 1, v2
+; GCN-NEXT:    v_addc_u32_e64 v10, s[0:1], 0, v3, s[0:1]
 ; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
 ; GCN-NEXT:    v_cndmask_b32_e64 v6, v10, v8, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v8, s9
-; GCN-NEXT:    v_subb_u32_e32 v0, vcc, v8, v0, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v0
+; GCN-NEXT:    v_mov_b32_e32 v8, s11
+; GCN-NEXT:    v_subb_u32_e32 v4, vcc, v8, v4, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v4
 ; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v8, v1, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v9, v7, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v5, v1, vcc
-; GCN-NEXT:    s_xor_b64 s[0:1], s[14:15], s[10:11]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v4, v6, vcc
-; GCN-NEXT:    v_xor_b32_e32 v1, s0, v1
-; GCN-NEXT:    v_xor_b32_e32 v4, s1, v0
-; GCN-NEXT:    v_mov_b32_e32 v5, s1
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v1
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v4, v5, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v5
+; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v4
+; GCN-NEXT:    v_cndmask_b32_e32 v4, v8, v5, vcc
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
+; GCN-NEXT:    v_cndmask_b32_e64 v4, v9, v7, s[0:1]
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GCN-NEXT:    s_xor_b64 s[0:1], s[14:15], s[8:9]
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
+; GCN-NEXT:    v_xor_b32_e32 v2, s0, v2
+; GCN-NEXT:    v_xor_b32_e32 v3, s1, v3
+; GCN-NEXT:    v_mov_b32_e32 v4, s1
+; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s0, v2
+; GCN-NEXT:    v_subb_u32_e32 v3, vcc, v3, v4, vcc
 ; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %shl.y = shl <2 x i64> <i64 4096, i64 4096>, %y
@@ -7010,8 +7057,13 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(i64 addrspace(1)* %out, i64 %
 
 define amdgpu_kernel void @srem_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x) {
 ; CHECK-LABEL: @srem_v2i64_pow2k_denom(
-; CHECK-NEXT:    [[R:%.*]] = srem <2 x i64> [[X:%.*]], <i64 4096, i64 4096>
-; CHECK-NEXT:    store <2 x i64> [[R]], <2 x i64> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = srem i64 [[TMP1]], 4096
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x i64> undef, i64 [[TMP2]], i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = extractelement <2 x i64> [[X]], i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = srem i64 [[TMP4]], 4096
+; CHECK-NEXT:    [[TMP6:%.*]] = insertelement <2 x i64> [[TMP3]], i64 [[TMP5]], i64 1
+; CHECK-NEXT:    store <2 x i64> [[TMP6]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_v2i64_pow2k_denom:
@@ -7022,20 +7074,20 @@ define amdgpu_kernel void @srem_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s9, s3, 31
-; GCN-NEXT:    s_lshr_b32 s9, s9, 20
-; GCN-NEXT:    s_add_u32 s9, s2, s9
-; GCN-NEXT:    s_addc_u32 s10, s3, 0
-; GCN-NEXT:    s_and_b32 s9, s9, s8
-; GCN-NEXT:    s_sub_u32 s2, s2, s9
-; GCN-NEXT:    s_subb_u32 s3, s3, s10
 ; GCN-NEXT:    s_ashr_i32 s9, s1, 31
 ; GCN-NEXT:    s_lshr_b32 s9, s9, 20
 ; GCN-NEXT:    s_add_u32 s9, s0, s9
 ; GCN-NEXT:    s_addc_u32 s10, s1, 0
-; GCN-NEXT:    s_and_b32 s8, s9, s8
-; GCN-NEXT:    s_sub_u32 s0, s0, s8
+; GCN-NEXT:    s_and_b32 s9, s9, s8
+; GCN-NEXT:    s_sub_u32 s0, s0, s9
 ; GCN-NEXT:    s_subb_u32 s1, s1, s10
+; GCN-NEXT:    s_ashr_i32 s9, s3, 31
+; GCN-NEXT:    s_lshr_b32 s9, s9, 20
+; GCN-NEXT:    s_add_u32 s9, s2, s9
+; GCN-NEXT:    s_addc_u32 s10, s3, 0
+; GCN-NEXT:    s_and_b32 s8, s9, s8
+; GCN-NEXT:    s_sub_u32 s2, s2, s8
+; GCN-NEXT:    s_subb_u32 s3, s3, s10
 ; GCN-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s2
@@ -7050,8 +7102,15 @@ define amdgpu_kernel void @srem_v2i64_pow2k_denom(<2 x i64> addrspace(1)* %out,
 define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %out, <2 x i64> %x, <2 x i64> %y) {
 ; CHECK-LABEL: @srem_v2i64_pow2_shl_denom(
 ; CHECK-NEXT:    [[SHL_Y:%.*]] = shl <2 x i64> <i64 4096, i64 4096>, [[Y:%.*]]
-; CHECK-NEXT:    [[R:%.*]] = srem <2 x i64> [[X:%.*]], [[SHL_Y]]
-; CHECK-NEXT:    store <2 x i64> [[R]], <2 x i64> addrspace(1)* [[OUT:%.*]]
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i64> [[X:%.*]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 0
+; CHECK-NEXT:    [[TMP3:%.*]] = srem i64 [[TMP1]], [[TMP2]]
+; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <2 x i64> undef, i64 [[TMP3]], i64 0
+; CHECK-NEXT:    [[TMP5:%.*]] = extractelement <2 x i64> [[X]], i64 1
+; CHECK-NEXT:    [[TMP6:%.*]] = extractelement <2 x i64> [[SHL_Y]], i64 1
+; CHECK-NEXT:    [[TMP7:%.*]] = srem i64 [[TMP5]], [[TMP6]]
+; CHECK-NEXT:    [[TMP8:%.*]] = insertelement <2 x i64> [[TMP4]], i64 [[TMP7]], i64 1
+; CHECK-NEXT:    store <2 x i64> [[TMP8]], <2 x i64> addrspace(1)* [[OUT:%.*]]
 ; CHECK-NEXT:    ret void
 ;
 ; GCN-LABEL: srem_v2i64_pow2_shl_denom:
@@ -7062,8 +7121,8 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    s_mov_b32 s18, 0x4f800000
 ; GCN-NEXT:    s_mov_b32 s19, 0x5f7ffffc
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_lshl_b64 s[14:15], s[2:3], s4
-; GCN-NEXT:    s_lshl_b64 s[2:3], s[2:3], s6
+; GCN-NEXT:    s_lshl_b64 s[14:15], s[2:3], s6
+; GCN-NEXT:    s_lshl_b64 s[2:3], s[2:3], s4
 ; GCN-NEXT:    s_ashr_i32 s4, s3, 31
 ; GCN-NEXT:    s_add_u32 s2, s2, s4
 ; GCN-NEXT:    s_mov_b32 s5, s4
@@ -7086,8 +7145,8 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s12, s11, 31
-; GCN-NEXT:    s_add_u32 s0, s10, s12
+; GCN-NEXT:    s_ashr_i32 s12, s9, 31
+; GCN-NEXT:    s_add_u32 s0, s8, s12
 ; GCN-NEXT:    v_mul_hi_u32 v3, s6, v0
 ; GCN-NEXT:    v_mul_lo_u32 v2, s6, v1
 ; GCN-NEXT:    v_mul_lo_u32 v4, s7, v0
@@ -7104,8 +7163,8 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v6, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v6, v1, v5
 ; GCN-NEXT:    v_mul_hi_u32 v5, v1, v5
-; GCN-NEXT:    s_addc_u32 s1, s11, s12
-; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[12:13]
+; GCN-NEXT:    s_addc_u32 s1, s9, s12
+; GCN-NEXT:    s_xor_b64 s[8:9], s[0:1], s[12:13]
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v4, v5, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v4, 0
@@ -7140,15 +7199,15 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v1, v5, s[2:3]
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
-; GCN-NEXT:    v_mul_hi_u32 v5, s10, v1
-; GCN-NEXT:    v_mul_hi_u32 v7, s11, v1
-; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
+; GCN-NEXT:    v_mul_lo_u32 v2, s8, v1
+; GCN-NEXT:    v_mul_hi_u32 v3, s8, v0
+; GCN-NEXT:    v_mul_hi_u32 v5, s8, v1
+; GCN-NEXT:    v_mul_hi_u32 v7, s9, v1
+; GCN-NEXT:    v_mul_lo_u32 v1, s9, v1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; GCN-NEXT:    v_mul_lo_u32 v5, s11, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GCN-NEXT:    v_mul_lo_u32 v5, s9, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s9, v0
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
 ; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
@@ -7161,9 +7220,9 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_mul_lo_u32 v0, s16, v0
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s11, v1
+; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s9, v1
 ; GCN-NEXT:    v_mov_b32_e32 v3, s17
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s10, v0
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s8, v0
 ; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
 ; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s16, v0
 ; GCN-NEXT:    v_subb_u32_e64 v3, s[2:3], v2, v3, s[0:1]
@@ -7178,14 +7237,14 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    s_ashr_i32 s2, s15, 31
 ; GCN-NEXT:    v_subbrev_u32_e64 v3, s[0:1], 0, v3, s[0:1]
 ; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v7
-; GCN-NEXT:    s_add_u32 s10, s14, s2
+; GCN-NEXT:    s_add_u32 s8, s14, s2
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v3, s11
+; GCN-NEXT:    v_mov_b32_e32 v3, s9
 ; GCN-NEXT:    s_mov_b32 s3, s2
-; GCN-NEXT:    s_addc_u32 s11, s15, s2
-; GCN-NEXT:    s_xor_b64 s[10:11], s[10:11], s[2:3]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v7, s10
-; GCN-NEXT:    v_cvt_f32_u32_e32 v9, s11
+; GCN-NEXT:    s_addc_u32 s9, s15, s2
+; GCN-NEXT:    s_xor_b64 s[8:9], s[8:9], s[2:3]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v7, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v9, s9
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v3, v1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s17, v1
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
@@ -7204,13 +7263,13 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_mac_f32_e32 v3, s21, v5
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v5, v5
-; GCN-NEXT:    s_sub_u32 s2, 0, s10
+; GCN-NEXT:    s_sub_u32 s2, 0, s8
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GCN-NEXT:    v_mul_hi_u32 v2, s2, v3
 ; GCN-NEXT:    v_mul_lo_u32 v7, s2, v5
-; GCN-NEXT:    s_subb_u32 s3, 0, s11
+; GCN-NEXT:    s_subb_u32 s3, 0, s9
 ; GCN-NEXT:    v_mul_lo_u32 v8, s3, v3
-; GCN-NEXT:    s_ashr_i32 s14, s9, 31
+; GCN-NEXT:    s_ashr_i32 s14, s11, 31
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v7
 ; GCN-NEXT:    v_mul_lo_u32 v7, s2, v3
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
@@ -7255,68 +7314,68 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(<2 x i64> addrspace(1)* %ou
 ; GCN-NEXT:    v_addc_u32_e32 v8, vcc, v6, v8, vcc
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
 ; GCN-NEXT:    v_addc_u32_e64 v5, vcc, v5, v8, s[0:1]
-; GCN-NEXT:    s_add_u32 s0, s8, s14
+; GCN-NEXT:    s_add_u32 s0, s10, s14
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    s_addc_u32 s1, s9, s14
-; GCN-NEXT:    s_xor_b64 s[8:9], s[0:1], s[14:15]
+; GCN-NEXT:    s_addc_u32 s1, s11, s14
+; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[14:15]
 ; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
-; GCN-NEXT:    v_mul_lo_u32 v5, s8, v3
-; GCN-NEXT:    v_mul_hi_u32 v7, s8, v2
-; GCN-NEXT:    v_mul_hi_u32 v9, s8, v3
-; GCN-NEXT:    v_mul_hi_u32 v10, s9, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s9, v3
+; GCN-NEXT:    v_mul_lo_u32 v5, s10, v3
+; GCN-NEXT:    v_mul_hi_u32 v7, s10, v2
+; GCN-NEXT:    v_mul_hi_u32 v9, s10, v3
+; GCN-NEXT:    v_mul_hi_u32 v10, s11, v3
+; GCN-NEXT:    v_mul_lo_u32 v3, s11, v3
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v9, vcc
-; GCN-NEXT:    v_mul_lo_u32 v9, s9, v2
-; GCN-NEXT:    v_mul_hi_u32 v2, s9, v2
+; GCN-NEXT:    v_mul_lo_u32 v9, s11, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
 ; GCN-NEXT:    v_mov_b32_e32 v8, s12
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v9, v5
 ; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v7, v2, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v10, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v2, v3
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v6, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s10, v2
-; GCN-NEXT:    v_mul_hi_u32 v6, s10, v5
-; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s12, v0
-; GCN-NEXT:    v_mul_lo_u32 v0, s11, v5
-; GCN-NEXT:    v_subb_u32_e32 v3, vcc, v1, v8, vcc
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v6, v4
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_mul_lo_u32 v1, s10, v5
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s9, v0
-; GCN-NEXT:    v_mov_b32_e32 v5, s11
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s8, v1
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v6, v4, vcc
+; GCN-NEXT:    v_mul_lo_u32 v3, s8, v3
+; GCN-NEXT:    v_mul_hi_u32 v4, s8, v2
+; GCN-NEXT:    v_mul_lo_u32 v5, s9, v2
+; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s12, v0
+; GCN-NEXT:    v_mul_lo_u32 v2, s8, v2
+; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v8, vcc
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s11, v3
+; GCN-NEXT:    v_mov_b32_e32 v5, s9
+; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s10, v2
 ; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v6, s[0:1], s10, v1
+; GCN-NEXT:    v_subrev_i32_e64 v6, s[0:1], s8, v2
 ; GCN-NEXT:    v_subb_u32_e64 v5, s[2:3], v4, v5, s[0:1]
 ; GCN-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s11, v4
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s9, v4
 ; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s10, v6
+; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s8, v6
 ; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s11, v4
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s9, v4
 ; GCN-NEXT:    v_cndmask_b32_e64 v7, v7, v8, s[0:1]
-; GCN-NEXT:    v_subrev_i32_e64 v8, s[0:1], s10, v6
+; GCN-NEXT:    v_subrev_i32_e64 v8, s[0:1], s8, v6
 ; GCN-NEXT:    v_subbrev_u32_e64 v5, s[0:1], 0, v5, s[0:1]
 ; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v7
 ; GCN-NEXT:    v_cndmask_b32_e64 v4, v4, v5, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v5, s9
-; GCN-NEXT:    v_subb_u32_e32 v0, vcc, v5, v0, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s11, v0
+; GCN-NEXT:    v_mov_b32_e32 v5, s11
+; GCN-NEXT:    v_subb_u32_e32 v3, vcc, v5, v3, vcc
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s9, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s10, v1
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s8, v2
 ; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s11, v0
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s9, v3
 ; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v8, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
-; GCN-NEXT:    v_xor_b32_e32 v1, s14, v1
-; GCN-NEXT:    v_xor_b32_e32 v4, s14, v0
-; GCN-NEXT:    v_mov_b32_e32 v5, s14
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s14, v1
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v4, v5, vcc
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GCN-NEXT:    v_xor_b32_e32 v2, s14, v2
+; GCN-NEXT:    v_xor_b32_e32 v3, s14, v3
+; GCN-NEXT:    v_mov_b32_e32 v4, s14
+; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s14, v2
+; GCN-NEXT:    v_subb_u32_e32 v3, vcc, v3, v4, vcc
 ; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
   %shl.y = shl <2 x i64> <i64 4096, i64 4096>, %y

diff  --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index cd6cecaa4ad7..29f73d6b37b8 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -amdgpu-codegenprepare-expand-div64 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-IR %s
 
 define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_sdiv:
@@ -139,6 +140,111 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v2, v5, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_sdiv:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0xd
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_ashr_i32 s2, s7, 31
+; GCN-IR-NEXT:    s_ashr_i32 s8, s13, 31
+; GCN-IR-NEXT:    s_mov_b32 s3, s2
+; GCN-IR-NEXT:    s_mov_b32 s9, s8
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s10, s0, s2
+; GCN-IR-NEXT:    s_subb_u32 s11, s1, s2
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[8:9], s[12:13]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s10
+; GCN-IR-NEXT:    s_sub_u32 s6, s0, s8
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s11
+; GCN-IR-NEXT:    s_subb_u32 s7, s1, s8
+; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s6
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
+; GCN-IR-NEXT:    s_add_i32 s15, s15, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s16, s7
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s11, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[12:13]
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s16
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s15
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v1, v0
+; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[12:13], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB0_7
+; GCN-IR-NEXT:  BB0_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[10:11], v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB0_6
+; GCN-IR-NEXT:  BB0_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[10:11], v0
+; GCN-IR-NEXT:    s_add_u32 s10, s6, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB0_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s11
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s10, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s6, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s7, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  BB0_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB0_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[8:9], s[2:3]
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, s0, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, s1, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %result = sdiv i64 %x, %y
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -272,6 +378,105 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v0, v8
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v2, v8, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_sdiv:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v1
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v5, 31, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v4, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v3, v5, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v2, v5, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, v0, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v13, vcc, v1, v4, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v5
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v3, v5, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[12:13]
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v8, v12
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v9, v13
+; GCN-IR-NEXT:    s_or_b64 s[6:7], vcc, s[4:5]
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v13
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v9, v8, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v2, v3
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[8:9]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[4:5], 63, v[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[6:7], vcc
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v13, 0, s[6:7]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[6:7], -1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v12, 0, s[6:7]
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB1_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 1, v8
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v10, vcc, 63, v8
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[2:3], v[8:9]
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[12:13], v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB1_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[16:17], v[12:13], v2
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v14, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v15, 0
+; GCN-IR-NEXT:  BB1_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[16:17], v[16:17], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v8, 31, v11
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v16, v16, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v11, v15, v11
+; GCN-IR-NEXT:    v_or_b32_e32 v10, v14, v10
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v12, v16
+; GCN-IR-NEXT:    v_subb_u32_e64 v8, s[4:5], v13, v17, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v14, 31, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 1, v14
+; GCN-IR-NEXT:    v_and_b32_e32 v15, v14, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v14, v14, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v16, vcc, v16, v14
+; GCN-IR-NEXT:    v_subb_u32_e32 v17, vcc, v17, v15, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v15, v9
+; GCN-IR-NEXT:    v_mov_b32_e32 v14, v8
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB1_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB1_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[10:11], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v9, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v8, v0
+; GCN-IR-NEXT:  BB1_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v7, v6
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v5, v4
+; GCN-IR-NEXT:    v_xor_b32_e32 v2, v2, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v3, v3, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v3, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 %x, %y
   ret i64 %result
 }
@@ -289,23 +494,53 @@ define amdgpu_kernel void @s_test_sdiv24_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 40
 ; GCN-NEXT:    s_ashr_i64 s[6:7], s[8:9], 40
 ; GCN-NEXT:    s_xor_b32 s5, s4, s6
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s6
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s6
 ; GCN-NEXT:    s_ashr_i32 s4, s5, 30
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
 ; GCN-NEXT:    s_or_b32 s4, s4, 1
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v2
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_sdiv24_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 40
+; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[8:9], 40
+; GCN-IR-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-IR-NEXT:    s_ashr_i32 s4, s5, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 40
   %2 = ashr i64 %y, 40
   %result = sdiv i64 %1, %2
@@ -319,31 +554,36 @@ define i64 @v_test_sdiv24_64(i64 %x, i64 %y) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
 ; GCN-NEXT:    v_lshrrev_b32_e32 v1, 8, v3
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, v1
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v2
-; GCN-NEXT:    v_mul_f32_e32 v2, 0x4f800000, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT:    v_mul_hi_u32 v3, v2, v1
-; GCN-NEXT:    v_mul_lo_u32 v4, v2, v1
-; GCN-NEXT:    v_sub_i32_e32 v5, vcc, 0, v4
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
-; GCN-NEXT:    v_mul_hi_u32 v3, v3, v2
-; GCN-NEXT:    v_add_i32_e64 v4, s[4:5], v2, v3
-; GCN-NEXT:    v_sub_i32_e64 v2, s[4:5], v2, v3
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
-; GCN-NEXT:    v_mul_hi_u32 v2, v2, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, v2, v1
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, 1, v2
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, -1, v2
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
-; GCN-NEXT:    v_sub_i32_e64 v0, s[4:5], v0, v3
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v0, v1
-; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v4, s[4:5]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 25
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_sdiv24_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v1, 8, v3
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, v1
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 25
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %1 = lshr i64 %x, 40
   %2 = lshr i64 %y, 40
   %result = sdiv i64 %1, %2
@@ -354,47 +594,56 @@ define amdgpu_kernel void @s_test_sdiv32_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_sdiv32_64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s2, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    s_ashr_i32 s3, s2, 31
-; GCN-NEXT:    s_ashr_i32 s4, s7, 31
-; GCN-NEXT:    s_add_i32 s2, s2, s3
-; GCN-NEXT:    s_add_i32 s0, s7, s4
-; GCN-NEXT:    s_xor_b32 s5, s2, s3
-; GCN-NEXT:    s_xor_b32 s2, s0, s4
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s5
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s5
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s5
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s2, v1
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s2, v1
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s5, v1
-; GCN-NEXT:    s_and_b64 vcc, vcc, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
-; GCN-NEXT:    s_xor_b32 s0, s4, s3
-; GCN-NEXT:    v_xor_b32_e32 v0, s0, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_xor_b32 s4, s7, s8
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s7
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s8
+; GCN-NEXT:    s_ashr_i32 s4, s4, 30
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    s_or_b32 s4, s4, 1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_sdiv32_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_xor_b32 s4, s7, s8
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s7
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s8
+; GCN-IR-NEXT:    s_ashr_i32 s4, s4, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 32
   %2 = ashr i64 %y, 32
   %result = sdiv i64 %1, %2
@@ -406,49 +655,62 @@ define amdgpu_kernel void @s_test_sdiv31_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_sdiv31_64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s3, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    s_ashr_i64 s[0:1], s[6:7], 33
-; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 33
-; GCN-NEXT:    s_ashr_i32 s3, s2, 31
-; GCN-NEXT:    s_ashr_i32 s4, s0, 31
-; GCN-NEXT:    s_add_i32 s1, s2, s3
-; GCN-NEXT:    s_add_i32 s0, s0, s4
-; GCN-NEXT:    s_xor_b32 s5, s1, s3
-; GCN-NEXT:    s_xor_b32 s2, s0, s4
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s5
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s5
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s5
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s2, v1
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s2, v1
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s5, v1
-; GCN-NEXT:    s_and_b64 vcc, vcc, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
-; GCN-NEXT:    s_xor_b32 s0, s4, s3
-; GCN-NEXT:    v_xor_b32_e32 v0, s0, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 33
+; GCN-NEXT:    s_ashr_i64 s[6:7], s[8:9], 33
+; GCN-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-NEXT:    s_ashr_i32 s4, s5, 30
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    s_or_b32 s4, s4, 1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 31
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_sdiv31_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 33
+; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[8:9], 33
+; GCN-IR-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-IR-NEXT:    s_ashr_i32 s4, s5, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 31
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 33
   %2 = ashr i64 %y, 33
   %result = sdiv i64 %1, %2
@@ -469,23 +731,53 @@ define amdgpu_kernel void @s_test_sdiv23_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 41
 ; GCN-NEXT:    s_ashr_i64 s[6:7], s[8:9], 41
 ; GCN-NEXT:    s_xor_b32 s5, s4, s6
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s6
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s6
 ; GCN-NEXT:    s_ashr_i32 s4, s5, 30
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
 ; GCN-NEXT:    s_or_b32 s4, s4, 1
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mov_b32_e32 v3, s4
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v2
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 23
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_sdiv23_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 41
+; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[8:9], 41
+; GCN-IR-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-IR-NEXT:    s_ashr_i32 s4, s5, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 23
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 41
   %2 = ashr i64 %y, 41
   %result = sdiv i64 %1, %2
@@ -497,49 +789,62 @@ define amdgpu_kernel void @s_test_sdiv25_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_sdiv25_64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s3, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    s_ashr_i64 s[0:1], s[6:7], 39
-; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 39
-; GCN-NEXT:    s_ashr_i32 s3, s2, 31
-; GCN-NEXT:    s_ashr_i32 s4, s0, 31
-; GCN-NEXT:    s_add_i32 s1, s2, s3
-; GCN-NEXT:    s_add_i32 s0, s0, s4
-; GCN-NEXT:    s_xor_b32 s5, s1, s3
-; GCN-NEXT:    s_xor_b32 s2, s0, s4
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s5
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s5
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s5
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s2, v1
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s2, v1
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s5, v1
-; GCN-NEXT:    s_and_b64 vcc, vcc, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
-; GCN-NEXT:    s_xor_b32 s0, s4, s3
-; GCN-NEXT:    v_xor_b32_e32 v0, s0, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 39
+; GCN-NEXT:    s_ashr_i64 s[6:7], s[8:9], 39
+; GCN-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-NEXT:    s_ashr_i32 s4, s5, 30
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    s_or_b32 s4, s4, 1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 25
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_sdiv25_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 39
+; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[8:9], 39
+; GCN-IR-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-IR-NEXT:    s_ashr_i32 s4, s5, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 25
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 39
   %2 = ashr i64 %y, 39
   %result = sdiv i64 %1, %2
@@ -556,10 +861,10 @@ define amdgpu_kernel void @s_test_sdiv24_v2i64(<2 x i64> addrspace(1)* %out, <2
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i64 s[0:1], s[8:9], 40
-; GCN-NEXT:    s_ashr_i64 s[2:3], s[10:11], 40
-; GCN-NEXT:    s_ashr_i64 s[8:9], s[12:13], 40
-; GCN-NEXT:    s_ashr_i64 s[10:11], s[14:15], 40
+; GCN-NEXT:    s_ashr_i64 s[0:1], s[10:11], 40
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[8:9], 40
+; GCN-NEXT:    s_ashr_i64 s[8:9], s[14:15], 40
+; GCN-NEXT:    s_ashr_i64 s[10:11], s[12:13], 40
 ; GCN-NEXT:    s_xor_b32 s1, s2, s10
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s2
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s10
@@ -588,12 +893,59 @@ define amdgpu_kernel void @s_test_sdiv24_v2i64(<2 x i64> addrspace(1)* %out, <2
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v7, vcc
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v5
-; GCN-NEXT:    v_bfe_i32 v2, v0, 0, 24
-; GCN-NEXT:    v_bfe_i32 v0, v1, 0, 24
-; GCN-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-NEXT:    v_bfe_i32 v2, v1, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_sdiv24_v2i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dwordx4 s[12:15], s[0:1], 0x11
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[10:11], 40
+; GCN-IR-NEXT:    s_ashr_i64 s[2:3], s[8:9], 40
+; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[14:15], 40
+; GCN-IR-NEXT:    s_ashr_i64 s[10:11], s[12:13], 40
+; GCN-IR-NEXT:    s_xor_b32 s1, s2, s10
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s2
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s10
+; GCN-IR-NEXT:    s_xor_b32 s2, s0, s8
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v2, s0
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v3, s8
+; GCN-IR-NEXT:    s_ashr_i32 s0, s1, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v4, v1
+; GCN-IR-NEXT:    s_ashr_i32 s1, s2, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v5, v3
+; GCN-IR-NEXT:    s_or_b32 s0, s0, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v4, v0, v4
+; GCN-IR-NEXT:    s_or_b32 s1, s1, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v5, v2, v5
+; GCN-IR-NEXT:    v_trunc_f32_e32 v4, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, s0
+; GCN-IR-NEXT:    v_trunc_f32_e32 v5, v5
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, s1
+; GCN-IR-NEXT:    v_mad_f32 v0, -v4, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v4, v4
+; GCN-IR-NEXT:    v_mad_f32 v2, -v5, v3, v2
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v5, v5
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v6, vcc
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v3|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v5
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_bfe_i32 v2, v1, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; GCN-IR-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr <2 x i64> %x, <i64 40, i64 40>
   %2 = ashr <2 x i64> %y, <i64 40, i64 40>
   %result = sdiv <2 x i64> %1, %2
@@ -636,6 +988,119 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GCN-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:4
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_sdiv24_48:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dword s6, s[0:1], 0xe
+; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xc
+; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xb
+; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xd
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_sext_i32_i16 s3, s3
+; GCN-IR-NEXT:    s_sext_i32_i16 s9, s6
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 24
+; GCN-IR-NEXT:    s_ashr_i32 s2, s3, 31
+; GCN-IR-NEXT:    s_ashr_i32 s6, s9, 31
+; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[8:9], 24
+; GCN-IR-NEXT:    s_mov_b32 s3, s2
+; GCN-IR-NEXT:    s_mov_b32 s7, s6
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
+; GCN-IR-NEXT:    s_sub_u32 s10, s0, s2
+; GCN-IR-NEXT:    s_subb_u32 s11, s1, s2
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[6:7], s[8:9]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s10
+; GCN-IR-NEXT:    s_sub_u32 s8, s0, s6
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s11
+; GCN-IR-NEXT:    s_subb_u32 s9, s1, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s8
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    s_add_i32 s15, s15, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s16, s9
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s11, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[12:13]
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s16
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s15
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v1, v0
+; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[12:13], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB9_7
+; GCN-IR-NEXT:  BB9_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[10:11], v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB9_6
+; GCN-IR-NEXT:  BB9_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[10:11], v0
+; GCN-IR-NEXT:    s_add_u32 s10, s8, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s11, s9, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB9_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s11
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s10, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s8, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s9, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_5
+; GCN-IR-NEXT:  BB9_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB9_7: ; %udiv-end
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[6:7], s[2:3]
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, s0, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, s1, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:4
+; GCN-IR-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i48 %x, 24
   %2 = ashr i48 %y, 24
   %result = sdiv i48 %1, %2
@@ -769,6 +1234,93 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v3, v2, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_sdiv_k_num_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_ashr_i32 s2, s7, 31
+; GCN-IR-NEXT:    s_mov_b32 s3, s2
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s8, s0, s2
+; GCN-IR-NEXT:    s_subb_u32 s9, s1, s2
+; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s8
+; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s9
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffc5, v0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[6:7]
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB10_7
+; GCN-IR-NEXT:  BB10_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], 24, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB10_6
+; GCN-IR-NEXT:  BB10_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v0
+; GCN-IR-NEXT:    s_add_u32 s7, s8, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s10, s9, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB10_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s10
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s7, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s8, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s9, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_5
+; GCN-IR-NEXT:  BB10_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB10_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, s2, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
+; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %result = sdiv i64 24, %x
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -892,6 +1444,91 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v3, v2, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_sdiv_k_num_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GCN-IR-NEXT:    s_movk_i32 s4, 0xffc5
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v2, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 32, v4
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v4, v5, v4, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, s4, v4
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, 24, 0, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB11_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, 63, v6
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[4:5], v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], 24, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB11_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[14:15], 24, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:  BB11_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[14:15], v[14:15], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v9
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, -1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, -1, v5, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v14, v14, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v9, v13, v9
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v12, v8
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v10, v14
+; GCN-IR-NEXT:    v_subb_u32_e64 v6, s[4:5], v11, v15, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v14, vcc, v14, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v15, vcc, v15, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB11_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB11_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[8:9], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v0
+; GCN-IR-NEXT:  BB11_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 24, %x
   ret i64 %result
 }
@@ -1015,6 +1652,96 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v3, v2, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_sdiv_pow2_k_num_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GCN-IR-NEXT:    s_mov_b32 s11, 0
+; GCN-IR-NEXT:    s_movk_i32 s4, 0xffd0
+; GCN-IR-NEXT:    s_mov_b32 s10, 0x8000
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v2, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, s10
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v6, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v5, vcc, 32, v5
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v5, v6, v5, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, s4, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB12_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, 63, v6
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[4:5], v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], s[10:11], v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB12_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    s_mov_b32 s5, 0
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x8000
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[14:15], s[4:5], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:  BB12_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[14:15], v[14:15], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v8, 31, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, -1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, -1, v5, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v14, v14, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v13, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v12, v6
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v10, v14
+; GCN-IR-NEXT:    v_subb_u32_e64 v8, s[4:5], v11, v15, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v14, vcc, v14, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v15, vcc, v15, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v9
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v8
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB12_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB12_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[6:7], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v9, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v8, v0
+; GCN-IR-NEXT:  BB12_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 32768, %x
   ret i64 %result
 }
@@ -1029,6 +1756,89 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_ashr_i64 v[0:1], v[0:1], 15
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_sdiv_pow2_k_den_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v2, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v0, v8
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v1, v9
+; GCN-IR-NEXT:    v_add_i32_e64 v0, s[4:5], 32, v0
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v9
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v1, v0, s[4:5]
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v0
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v9, 0, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[4:5], -1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v8, 0, s[4:5]
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB13_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 63, v4
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[0:1], v[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[8:9], v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB13_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[8:9], v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:  BB13_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v9, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v8, v6
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v10
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[4:5], s[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v10, vcc, v10, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v11, vcc, v11, v12, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB13_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB13_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[6:7], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v5, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v4, v0
+; GCN-IR-NEXT:  BB13_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 %x, 32768
   ret i64 %result
 }
@@ -1044,22 +1854,49 @@ define amdgpu_kernel void @s_test_sdiv24_k_num_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    s_ashr_i64 s[0:1], s[2:3], 40
+; GCN-NEXT:    s_ashr_i32 s1, s0, 30
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s0
-; GCN-NEXT:    s_ashr_i32 s0, s0, 30
+; GCN-NEXT:    s_or_b32 s0, s1, 1
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v0
-; GCN-NEXT:    s_or_b32 s0, s0, 1
 ; GCN-NEXT:    v_mul_f32_e32 v1, s2, v1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_mad_f32 v3, -v1, v0, s2
+; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v0|
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_sdiv24_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s2, 0x41c00000
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 40
+; GCN-IR-NEXT:    s_ashr_i32 s1, s0, 30
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s0
+; GCN-IR-NEXT:    s_or_b32 s0, s1, 1
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v1, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, s2, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mad_f32 v3, -v1, v0, s2
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v0|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %x.shr = ashr i64 %x, 40
   %result = sdiv i64 24, %x.shr
   store i64 %result, i64 addrspace(1)* %out
@@ -1077,21 +1914,47 @@ define amdgpu_kernel void @s_test_sdiv24_k_den_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    s_ashr_i64 s[0:1], s[2:3], 40
+; GCN-NEXT:    s_ashr_i32 s1, s0, 30
 ; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s0
-; GCN-NEXT:    s_ashr_i32 s0, s0, 30
+; GCN-NEXT:    s_or_b32 s0, s1, 1
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
-; GCN-NEXT:    s_or_b32 s0, s0, 1
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s0
 ; GCN-NEXT:    v_mad_f32 v0, -v1, s2, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s2
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_sdiv24_k_den_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s2, 0x46b6fe00
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 40
+; GCN-IR-NEXT:    s_ashr_i32 s1, s0, 30
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s0
+; GCN-IR-NEXT:    s_or_b32 s0, s1, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-IR-NEXT:    v_mad_f32 v0, -v1, s2, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s2
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %x.shr = ashr i64 %x, 40
   %result = sdiv i64 %x.shr, 23423
   store i64 %result, i64 addrspace(1)* %out
@@ -1104,20 +1967,40 @@ define i64 @v_test_sdiv24_k_num_i64(i64 %x) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_ashr_i64 v[0:1], v[0:1], 40
 ; GCN-NEXT:    s_mov_b32 s4, 0x41c00000
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, v0
-; GCN-NEXT:    v_ashrrev_i32_e32 v0, 30, v0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
-; GCN-NEXT:    v_or_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 30, v0
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GCN-NEXT:    v_or_b32_e32 v1, 1, v1
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    v_mul_f32_e32 v2, s4, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_mad_f32 v3, -v2, v1, s4
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v1|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v2
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GCN-NEXT:    v_mad_f32 v3, -v2, v0, s4
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v0|
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_sdiv24_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashr_i64 v[0:1], v[0:1], 40
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x41c00000
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 30, v0
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, 1, v1
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, s4, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v3, -v2, v0, s4
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v0|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = ashr i64 %x, 40
   %result = sdiv i64 24, %x.shr
   ret i64 %result
@@ -1129,20 +2012,40 @@ define i64 @v_test_sdiv24_pow2_k_num_i64(i64 %x) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_ashr_i64 v[0:1], v[0:1], 40
 ; GCN-NEXT:    s_mov_b32 s4, 0x47000000
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, v0
-; GCN-NEXT:    v_ashrrev_i32_e32 v0, 30, v0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
-; GCN-NEXT:    v_or_b32_e32 v0, 1, v0
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 30, v0
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GCN-NEXT:    v_or_b32_e32 v1, 1, v1
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
 ; GCN-NEXT:    v_mul_f32_e32 v2, s4, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_mad_f32 v3, -v2, v1, s4
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v1|
-; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v2
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GCN-NEXT:    v_mad_f32 v3, -v2, v0, s4
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v0|
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_sdiv24_pow2_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashr_i64 v[0:1], v[0:1], 40
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x47000000
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 30, v0
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, 1, v1
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, s4, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v3, -v2, v0, s4
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v0|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = ashr i64 %x, 40
   %result = sdiv i64 32768, %x.shr
   ret i64 %result
@@ -1158,6 +2061,25 @@ define i64 @v_test_sdiv24_pow2_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_ashr_i64 v[0:1], v[0:1], 15
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_sdiv24_pow2_k_den_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashr_i64 v[0:1], v[0:1], 40
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x47000000
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 30, v0
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, 1, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, 0x38000000, v0
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, s4, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s4
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = ashr i64 %x, 40
   %result = sdiv i64 %x.shr, 32768
   ret i64 %result

diff  --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 9113f6c2e638..73da5d42e15b 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -amdgpu-codegenprepare-expand-div64 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-IR %s
 
 define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_srem:
@@ -122,6 +123,106 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_srem:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s2
+; GCN-IR-NEXT:    s_add_i32 s11, s0, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s3
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-IR-NEXT:    s_add_i32 s8, s10, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s11
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v0, v1
+; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB0_7
+; GCN-IR-NEXT:  BB0_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[6:7], v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB0_6
+; GCN-IR-NEXT:  BB0_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[6:7], v0
+; GCN-IR-NEXT:    s_add_u32 s8, s2, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s9, s3, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB0_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s9
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s8, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s2, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s3, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  BB0_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB0_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s11, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s10, -1
+; GCN-IR-NEXT:    s_mov_b32 s8, s4
+; GCN-IR-NEXT:    s_mov_b32 s9, s5
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s2, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s3, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s2, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, s7
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v4, v1, vcc
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-IR-NEXT:    s_endpgm
   %result = urem i64 %x, %y
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -253,6 +354,110 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v0, v7
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v7, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_srem:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v1
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, v4
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v4
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GCN-IR-NEXT:    v_xor_b32_e32 v3, v3, v6
+; GCN-IR-NEXT:    v_xor_b32_e32 v2, v2, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v4, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v2, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v3, vcc, v3, v6, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v6, v2
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v7, v3
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v8, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v9, v1
+; GCN-IR-NEXT:    s_or_b64 s[6:7], vcc, s[4:5]
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 32, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 32, v8
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v6, v7, v6, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v7, v9, v8, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v6, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[8:9]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[4:5], 63, v[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[6:7], vcc
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[6:7]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[6:7], -1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[6:7]
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB1_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v8
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v10, vcc, 63, v8
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[6:7], v[8:9]
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[0:1], v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB1_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[16:17], v[0:1], v6
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v14, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v15, 0
+; GCN-IR-NEXT:  BB1_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[16:17], v[16:17], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v8, 31, v11
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, -1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, -1, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v16, v16, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v11, v15, v11
+; GCN-IR-NEXT:    v_or_b32_e32 v10, v14, v10
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v12, v16
+; GCN-IR-NEXT:    v_subb_u32_e64 v8, s[4:5], v13, v17, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v14, 31, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 1, v14
+; GCN-IR-NEXT:    v_and_b32_e32 v15, v14, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v14, v14, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v16, vcc, v16, v14
+; GCN-IR-NEXT:    v_subb_u32_e32 v17, vcc, v17, v15, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v15, v9
+; GCN-IR-NEXT:    v_mov_b32_e32 v14, v8
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB1_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB1_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[10:11], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v9, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v8, v6
+; GCN-IR-NEXT:  BB1_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_mul_lo_u32 v7, v2, v7
+; GCN-IR-NEXT:    v_mul_hi_u32 v8, v2, v6
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, v3, v6
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v2, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v8, v7
+; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v4
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v5
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, %y
   ret i64 %result
 }
@@ -270,25 +475,57 @@ define amdgpu_kernel void @s_test_srem23_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 41
 ; GCN-NEXT:    s_ashr_i64 s[6:7], s[8:9], 41
 ; GCN-NEXT:    s_xor_b32 s5, s4, s6
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s6
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s6
 ; GCN-NEXT:    s_ashr_i32 s5, s5, 30
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
 ; GCN-NEXT:    s_or_b32 s5, s5, 1
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v2
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s6
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 23
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_srem23_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 41
+; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[8:9], 41
+; GCN-IR-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-IR-NEXT:    s_ashr_i32 s5, s5, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    s_or_b32 s5, s5, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s5
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s6
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 23
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 41
   %2 = ashr i64 %y, 41
   %result = srem i64 %1, %2
@@ -309,25 +546,57 @@ define amdgpu_kernel void @s_test_srem24_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 40
 ; GCN-NEXT:    s_ashr_i64 s[6:7], s[8:9], 40
 ; GCN-NEXT:    s_xor_b32 s5, s4, s6
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s6
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s6
 ; GCN-NEXT:    s_ashr_i32 s5, s5, 30
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
 ; GCN-NEXT:    s_or_b32 s5, s5, 1
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
 ; GCN-NEXT:    v_trunc_f32_e32 v2, v2
 ; GCN-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v2
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s6
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_srem24_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 40
+; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[8:9], 40
+; GCN-IR-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-IR-NEXT:    s_ashr_i32 s5, s5, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    s_or_b32 s5, s5, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s5
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s6
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 40
   %2 = ashr i64 %y, 40
   %result = srem i64 %1, %2
@@ -350,15 +619,39 @@ define i64 @v_test_srem24_64(i64 %x, i64 %y) {
 ; GCN-NEXT:    v_mul_f32_e32 v5, v3, v5
 ; GCN-NEXT:    v_trunc_f32_e32 v5, v5
 ; GCN-NEXT:    v_mad_f32 v3, -v5, v4, v3
+; GCN-NEXT:    v_cvt_i32_f32_e32 v5, v5
 ; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v4|
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v3, v5
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
 ; GCN-NEXT:    v_mul_lo_u32 v1, v2, v1
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_srem24_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashr_i64 v[0:1], v[0:1], 40
+; GCN-IR-NEXT:    v_ashr_i64 v[1:2], v[2:3], 40
+; GCN-IR-NEXT:    v_xor_b32_e32 v2, v0, v1
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v3, v0
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v4, v1
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 30, v2
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v5, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v2, 1, v2
+; GCN-IR-NEXT:    v_mul_f32_e32 v5, v3, v5
+; GCN-IR-NEXT:    v_trunc_f32_e32 v5, v5
+; GCN-IR-NEXT:    v_mad_f32 v3, -v5, v4, v3
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v5, v5
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v4|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, v2, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %1 = ashr i64 %x, 40
   %2 = ashr i64 %y, 40
   %result = srem i64 %1, %2
@@ -369,48 +662,66 @@ define amdgpu_kernel void @s_test_srem25_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_srem25_64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s3, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    s_ashr_i64 s[0:1], s[6:7], 39
-; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 39
-; GCN-NEXT:    s_ashr_i32 s1, s2, 31
-; GCN-NEXT:    s_ashr_i32 s4, s0, 31
-; GCN-NEXT:    s_add_i32 s2, s2, s1
-; GCN-NEXT:    s_add_i32 s0, s0, s4
-; GCN-NEXT:    s_xor_b32 s5, s2, s1
-; GCN-NEXT:    s_xor_b32 s2, s0, s4
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s5
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s5
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s5
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s2, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, s5, v1
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s2, v0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s5, v1
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s5, v1
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
-; GCN-NEXT:    v_xor_b32_e32 v0, s4, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s4, v0
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 39
+; GCN-NEXT:    s_ashr_i64 s[6:7], s[8:9], 39
+; GCN-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-NEXT:    s_ashr_i32 s5, s5, 30
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    s_or_b32 s5, s5, 1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mov_b32_e32 v3, s5
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s6
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 25
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_srem25_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 39
+; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[8:9], 39
+; GCN-IR-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-IR-NEXT:    s_ashr_i32 s5, s5, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    s_or_b32 s5, s5, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s5
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s6
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 25
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 39
   %2 = ashr i64 %y, 39
   %result = srem i64 %1, %2
@@ -422,48 +733,66 @@ define amdgpu_kernel void @s_test_srem31_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_srem31_64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s3, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    s_ashr_i64 s[0:1], s[6:7], 33
-; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 33
-; GCN-NEXT:    s_ashr_i32 s1, s2, 31
-; GCN-NEXT:    s_ashr_i32 s4, s0, 31
-; GCN-NEXT:    s_add_i32 s2, s2, s1
-; GCN-NEXT:    s_add_i32 s0, s0, s4
-; GCN-NEXT:    s_xor_b32 s5, s2, s1
-; GCN-NEXT:    s_xor_b32 s2, s0, s4
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s5
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s5
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s5
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s2, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, s5, v1
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s2, v0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s5, v1
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s5, v1
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
-; GCN-NEXT:    v_xor_b32_e32 v0, s4, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s4, v0
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 33
+; GCN-NEXT:    s_ashr_i64 s[6:7], s[8:9], 33
+; GCN-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-NEXT:    s_ashr_i32 s5, s5, 30
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    s_or_b32 s5, s5, 1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mov_b32_e32 v3, s5
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s6
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 31
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_srem31_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s9, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 33
+; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[8:9], 33
+; GCN-IR-NEXT:    s_xor_b32 s5, s4, s6
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s6
+; GCN-IR-NEXT:    s_ashr_i32 s5, s5, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    s_or_b32 s5, s5, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s5
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s6
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 31
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 33
   %2 = ashr i64 %y, 33
   %result = srem i64 %1, %2
@@ -476,46 +805,60 @@ define amdgpu_kernel void @s_test_srem32_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_srem32_64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s2, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    s_ashr_i32 s0, s2, 31
-; GCN-NEXT:    s_ashr_i32 s4, s7, 31
-; GCN-NEXT:    s_add_i32 s2, s2, s0
-; GCN-NEXT:    s_add_i32 s1, s7, s4
-; GCN-NEXT:    s_xor_b32 s5, s2, s0
-; GCN-NEXT:    s_xor_b32 s2, s1, s4
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s5
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s5
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s5
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s2, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, s5, v1
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s2, v0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s5, v1
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s5, v1
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
-; GCN-NEXT:    v_xor_b32_e32 v0, s4, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s4, v0
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_xor_b32 s4, s7, s8
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s7
+; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s8
+; GCN-NEXT:    s_ashr_i32 s4, s4, 30
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    s_or_b32 s4, s4, 1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s8
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s7, v0
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_srem32_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_xor_b32 s4, s7, s8
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s7
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s8
+; GCN-IR-NEXT:    s_ashr_i32 s4, s4, 30
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    s_or_b32 s4, s4, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, |v1|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s8
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s7, v0
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 32
   %2 = ashr i64 %y, 32
   %result = srem i64 %1, %2
@@ -662,6 +1005,121 @@ define amdgpu_kernel void @s_test_srem33_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v2, v3, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_srem33_64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[6:7], 31
+; GCN-IR-NEXT:    s_ashr_i32 s2, s7, 31
+; GCN-IR-NEXT:    s_ashr_i32 s6, s9, 31
+; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[8:9], 31
+; GCN-IR-NEXT:    s_mov_b32 s3, s2
+; GCN-IR-NEXT:    s_mov_b32 s7, s6
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s8, s0, s2
+; GCN-IR-NEXT:    s_subb_u32 s9, s1, s2
+; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s8
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s6
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s9
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s10
+; GCN-IR-NEXT:    s_add_i32 s14, s7, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_add_i32 s12, s13, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s11
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[6:7], s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s12
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s11, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v1, v0
+; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[6:7], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[6:7]
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB8_7
+; GCN-IR-NEXT:  BB8_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[8:9], v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB8_6
+; GCN-IR-NEXT:  BB8_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[8:9], v0
+; GCN-IR-NEXT:    s_add_u32 s6, s10, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s7, s11, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB8_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s7
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s6, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s10, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s11, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_5
+; GCN-IR-NEXT:  BB8_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB8_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s10, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s10, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s11, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s10, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, s9
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s8, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v4, v1, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, s2, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 31
   %2 = ashr i64 %y, 31
   %result = srem i64 %1, %2
@@ -706,6 +1164,127 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; GCN-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:4
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_srem24_48:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dword s7, s[0:1], 0xe
+; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xc
+; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xb
+; GCN-IR-NEXT:    s_load_dword s6, s[0:1], 0xd
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_sext_i32_i16 s3, s3
+; GCN-IR-NEXT:    s_sext_i32_i16 s7, s7
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 24
+; GCN-IR-NEXT:    s_ashr_i32 s2, s3, 31
+; GCN-IR-NEXT:    s_ashr_i32 s10, s7, 31
+; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[6:7], 24
+; GCN-IR-NEXT:    s_mov_b32 s3, s2
+; GCN-IR-NEXT:    s_mov_b32 s11, s10
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[6:7], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s8, s0, s2
+; GCN-IR-NEXT:    s_subb_u32 s9, s1, s2
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s8
+; GCN-IR-NEXT:    s_sub_u32 s6, s6, s10
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s9
+; GCN-IR-NEXT:    s_subb_u32 s7, s7, s10
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s6
+; GCN-IR-NEXT:    s_add_i32 s14, s11, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
+; GCN-IR-NEXT:    s_add_i32 s12, s13, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[10:11], s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s12
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v1, v0
+; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[10:11], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[10:11]
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB9_7
+; GCN-IR-NEXT:  BB9_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[8:9], v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB9_6
+; GCN-IR-NEXT:  BB9_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[8:9], v0
+; GCN-IR-NEXT:    s_add_u32 s10, s6, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB9_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s11
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s10, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s6, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s7, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_5
+; GCN-IR-NEXT:  BB9_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB9_7: ; %udiv-end
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s6, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s6, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s7, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s6, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, s9
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, s3
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s8, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v4, v1, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, s2, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, s3, v1
+; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; GCN-IR-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:4
+; GCN-IR-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i48 %x, 24
   %2 = ashr i48 %y, 24
   %result = srem i48 %1, %2
@@ -833,6 +1412,96 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_srem_k_num_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_ashr_i32 s0, s7, 31
+; GCN-IR-NEXT:    s_mov_b32 s1, s0
+; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[6:7], s[0:1]
+; GCN-IR-NEXT:    s_sub_u32 s2, s2, s0
+; GCN-IR-NEXT:    s_subb_u32 s3, s3, s0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s2
+; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s3
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffc5, v0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[6:7]
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB10_7
+; GCN-IR-NEXT:  BB10_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], 24, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB10_6
+; GCN-IR-NEXT:  BB10_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v0
+; GCN-IR-NEXT:    s_add_u32 s7, s2, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s8, s3, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB10_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s8
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s7, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s2, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s3, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_5
+; GCN-IR-NEXT:  BB10_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB10_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s2, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s3, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s2, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %result = srem i64 24, %x
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -951,6 +1620,94 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v4, v7, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[4:5]
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_srem_k_num_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GCN-IR-NEXT:    s_movk_i32 s4, 0xffc5
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, s4, v2
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, 24, 0, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB11_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 63, v4
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[2:3], v[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], 24, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB11_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[12:13], 24, v2
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:  BB11_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[12:13], v[12:13], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v12, v12, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v11, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v10, v6
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], v8, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v4, s[4:5], v9, v13, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, v12, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v13, vcc, v13, v11, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB11_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB11_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[6:7], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v5, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v4, v2
+; GCN-IR-NEXT:  BB11_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GCN-IR-NEXT:    v_mul_hi_u32 v4, v0, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v4, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 24, %x
   ret i64 %result
 }
@@ -1069,6 +1826,99 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v4, v7, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[4:5]
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_srem_pow2_k_num_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GCN-IR-NEXT:    s_mov_b32 s11, 0
+; GCN-IR-NEXT:    s_movk_i32 s4, 0xffd0
+; GCN-IR-NEXT:    s_mov_b32 s10, 0x8000
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s10
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, s4, v2
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v3, 0, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB12_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 63, v4
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[2:3], v[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[10:11], v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB12_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    s_mov_b32 s5, 0
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x8000
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[12:13], s[4:5], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:  BB12_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[12:13], v[12:13], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v12, v12, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v8, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v6, s[4:5], v9, v13, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, v12, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v13, vcc, v13, v11, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB12_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB12_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT:  BB12_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GCN-IR-NEXT:    v_mul_hi_u32 v4, v0, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v4, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 0x8000, v0
+; GCN-IR-NEXT:    v_add_i32_e64 v1, s[4:5], v2, v1
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 32768, %x
   ret i64 %result
 }
@@ -1085,6 +1935,92 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_srem_pow2_k_den_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
+; GCN-IR-NEXT:    v_add_i32_e64 v4, s[4:5], 32, v4
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v5, v4, s[4:5]
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], 48, v4
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v1, 0, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[4:5], -1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v0, 0, s[4:5]
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB13_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, 63, v6
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[4:5], v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB13_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[12:13], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:  BB13_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[12:13], v[12:13], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v9
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v14, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, -1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, -1, v5, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v12, v12, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v9, v11, v9
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v10, v8
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[4:5]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, s12, v12
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[4:5], s[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, v12, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v13, vcc, v13, v14, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB13_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB13_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[8:9], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:  BB13_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 15
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, 32768
   ret i64 %result
 }
@@ -1100,17 +2036,17 @@ define amdgpu_kernel void @s_test_srem24_k_num_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    s_ashr_i64 s[0:1], s[2:3], 40
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s0
 ; GCN-NEXT:    s_ashr_i32 s1, s0, 30
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v0
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s0
 ; GCN-NEXT:    s_or_b32 s1, s1, 1
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, s2, v1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s1
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_mad_f32 v3, -v1, v0, s2
+; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v0|
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v1, v1
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s0
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
@@ -1118,6 +2054,35 @@ define amdgpu_kernel void @s_test_srem24_k_num_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_srem24_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s2, 0x41c00000
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 40
+; GCN-IR-NEXT:    s_ashr_i32 s1, s0, 30
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s0
+; GCN-IR-NEXT:    s_or_b32 s1, s1, 1
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v1, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, s2, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mad_f32 v3, -v1, v0, s2
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v0|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s0
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %x.shr = ashr i64 %x, 40
   %result = srem i64 24, %x.shr
   store i64 %result, i64 addrspace(1)* %out
@@ -1135,10 +2100,10 @@ define amdgpu_kernel void @s_test_srem24_k_den_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    s_ashr_i64 s[0:1], s[2:3], 40
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s0
 ; GCN-NEXT:    s_ashr_i32 s1, s0, 30
-; GCN-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
+; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s0
 ; GCN-NEXT:    s_or_b32 s1, s1, 1
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s1
 ; GCN-NEXT:    v_mad_f32 v0, -v1, s2, v0
@@ -1153,6 +2118,35 @@ define amdgpu_kernel void @s_test_srem24_k_den_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_srem24_k_den_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s2, 0x46b6fe00
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 40
+; GCN-IR-NEXT:    s_ashr_i32 s1, s0, 30
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s0
+; GCN-IR-NEXT:    s_or_b32 s1, s1, 1
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-IR-NEXT:    v_mad_f32 v0, -v1, s2, v0
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s2
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, 0, v2, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    s_movk_i32 s1, 0x5b7f
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s0, v0
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %x.shr = ashr i64 %x, 40
   %result = srem i64 %x.shr, 23423
   store i64 %result, i64 addrspace(1)* %out
@@ -1165,22 +2159,44 @@ define i64 @v_test_srem24_k_num_i64(i64 %x) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_ashr_i64 v[0:1], v[0:1], 40
 ; GCN-NEXT:    s_mov_b32 s4, 0x41c00000
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, v0
-; GCN-NEXT:    v_ashrrev_i32_e32 v2, 30, v0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v1
-; GCN-NEXT:    v_or_b32_e32 v2, 1, v2
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 30, v0
+; GCN-NEXT:    v_cvt_f32_i32_e32 v2, v0
+; GCN-NEXT:    v_or_b32_e32 v1, 1, v1
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v2
 ; GCN-NEXT:    v_mul_f32_e32 v3, s4, v3
 ; GCN-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-NEXT:    v_mad_f32 v4, -v3, v1, s4
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v1|
-; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v2, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v3
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GCN-NEXT:    v_mad_f32 v4, -v3, v2, s4
+; GCN-NEXT:    v_cvt_i32_f32_e32 v3, v3
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v2|
+; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
 ; GCN-NEXT:    v_mul_lo_u32 v0, v1, v0
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_srem24_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashr_i64 v[0:1], v[0:1], 40
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x41c00000
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 30, v0
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, 1, v1
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v3, v2
+; GCN-IR-NEXT:    v_mul_f32_e32 v3, s4, v3
+; GCN-IR-NEXT:    v_trunc_f32_e32 v3, v3
+; GCN-IR-NEXT:    v_mad_f32 v4, -v3, v2, s4
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v3, v3
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v2|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = ashr i64 %x, 40
   %result = srem i64 24, %x.shr
   ret i64 %result
@@ -1192,22 +2208,44 @@ define i64 @v_test_srem24_pow2_k_num_i64(i64 %x) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_ashr_i64 v[0:1], v[0:1], 40
 ; GCN-NEXT:    s_mov_b32 s4, 0x47000000
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, v0
-; GCN-NEXT:    v_ashrrev_i32_e32 v2, 30, v0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v1
-; GCN-NEXT:    v_or_b32_e32 v2, 1, v2
+; GCN-NEXT:    v_ashrrev_i32_e32 v1, 30, v0
+; GCN-NEXT:    v_cvt_f32_i32_e32 v2, v0
+; GCN-NEXT:    v_or_b32_e32 v1, 1, v1
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v2
 ; GCN-NEXT:    v_mul_f32_e32 v3, s4, v3
 ; GCN-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-NEXT:    v_mad_f32 v4, -v3, v1, s4
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v1|
-; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v2, vcc
-; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v3
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GCN-NEXT:    v_mad_f32 v4, -v3, v2, s4
+; GCN-NEXT:    v_cvt_i32_f32_e32 v3, v3
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v2|
+; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
 ; GCN-NEXT:    v_mul_lo_u32 v0, v1, v0
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 0x8000, v0
 ; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 24
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_srem24_pow2_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashr_i64 v[0:1], v[0:1], 40
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x47000000
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 30, v0
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, 1, v1
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v3, v2
+; GCN-IR-NEXT:    v_mul_f32_e32 v3, s4, v3
+; GCN-IR-NEXT:    v_trunc_f32_e32 v3, v3
+; GCN-IR-NEXT:    v_mad_f32 v4, -v3, v2, s4
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v3, v3
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v2|
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 0x8000, v0
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = ashr i64 %x, 40
   %result = srem i64 32768, %x.shr
   ret i64 %result
@@ -1225,6 +2263,27 @@ define i64 @v_test_srem24_pow2_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_srem24_pow2_k_den_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_ashr_i64 v[0:1], v[0:1], 40
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x47000000
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 30, v0
+; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, 1, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v3, 0x38000000, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v3, v3
+; GCN-IR-NEXT:    v_mad_f32 v2, -v3, s4, v2
+; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v3, v3
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, s4
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
+; GCN-IR-NEXT:    v_lshlrev_b32_e32 v1, 15, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 24
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = ashr i64 %x, 40
   %result = srem i64 %x.shr, 32768
   ret i64 %result

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index c4795f1769de..375fdc6163aa 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -amdgpu-codegenprepare-expand-div64 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-IR %s
 
 define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_udiv_i64:
@@ -123,6 +124,95 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_udiv_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s2
+; GCN-IR-NEXT:    s_add_i32 s11, s0, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s3
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-IR-NEXT:    s_add_i32 s8, s10, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s11
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v0, v1
+; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB0_7
+; GCN-IR-NEXT:  BB0_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[6:7], v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB0_6
+; GCN-IR-NEXT:  BB0_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[6:7], v0
+; GCN-IR-NEXT:    s_add_u32 s6, s2, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s7, s3, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB0_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s7
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s6, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s2, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s3, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  BB0_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB0_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %result = udiv i64 %x, %y
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -241,6 +331,89 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v13, v11, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v5, v1, s[4:5]
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_udiv_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v6, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v7, v1
+; GCN-IR-NEXT:    s_or_b64 s[6:7], vcc, s[4:5]
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 32, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 32, v6
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v4, v5, v4, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v5, v7, v6, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v4, v5
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[6:7], vcc
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v1, 0, s[6:7]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[6:7], -1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v0, 0, s[6:7]
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB1_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, 63, v6
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[4:5], v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB1_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[12:13], v[0:1], v4
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:  BB1_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[12:13], v[12:13], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v9
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, -1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, -1, v5, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v12, v12, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v9, v11, v9
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v10, v8
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v6, s[4:5], v1, v13, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, v12, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v13, vcc, v13, v11, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB1_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB1_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[8:9], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v7, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v6, v0
+; GCN-IR-NEXT:  BB1_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, v5
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, v4
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = udiv i64 %x, %y
   ret i64 %result
 }
@@ -249,40 +422,52 @@ define amdgpu_kernel void @s_test_udiv24_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_udiv24_64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s2, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    s_lshr_b32 s3, s7, 8
-; GCN-NEXT:    s_lshr_b32 s2, s2, 8
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s3
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s2
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s3, v1
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s3, v1
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v1
-; GCN-NEXT:    s_and_b64 vcc, vcc, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_lshr_b32 s4, s7, 8
+; GCN-NEXT:    s_lshr_b32 s5, s8, 8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_udiv24_64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 8
+; GCN-IR-NEXT:    s_lshr_b32 s5, s8, 8
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = lshr i64 %x, 40
   %2 = lshr i64 %y, 40
   %result = udiv i64 %1, %2
@@ -296,31 +481,36 @@ define i64 @v_test_udiv24_i64(i64 %x, i64 %y) {
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
 ; GCN-NEXT:    v_lshrrev_b32_e32 v1, 8, v3
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, v1
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v2
-; GCN-NEXT:    v_mul_f32_e32 v2, 0x4f800000, v2
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT:    v_mul_hi_u32 v3, v2, v1
-; GCN-NEXT:    v_mul_lo_u32 v4, v2, v1
-; GCN-NEXT:    v_sub_i32_e32 v5, vcc, 0, v4
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
-; GCN-NEXT:    v_mul_hi_u32 v3, v3, v2
-; GCN-NEXT:    v_add_i32_e64 v4, s[4:5], v2, v3
-; GCN-NEXT:    v_sub_i32_e64 v2, s[4:5], v2, v3
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
-; GCN-NEXT:    v_mul_hi_u32 v2, v2, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, v2, v1
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, 1, v2
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, -1, v2
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
-; GCN-NEXT:    v_sub_i32_e64 v0, s[4:5], v0, v3
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v0, v1
-; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v4, s[4:5]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_udiv24_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v1, 8, v3
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %1 = lshr i64 %x, 40
   %2 = lshr i64 %y, 40
   %result = udiv i64 %1, %2
@@ -331,38 +521,46 @@ define amdgpu_kernel void @s_test_udiv32_i64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_udiv32_i64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s2, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s7
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s2
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s7, v1
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s7, v1
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v1
-; GCN-NEXT:    s_and_b64 vcc, vcc, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s7
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s8
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_udiv32_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s7
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, s8
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = lshr i64 %x, 32
   %2 = lshr i64 %y, 32
   %result = udiv i64 %1, %2
@@ -374,40 +572,52 @@ define amdgpu_kernel void @s_test_udiv31_i64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_udiv31_i64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s2, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    s_lshr_b32 s3, s7, 1
-; GCN-NEXT:    s_lshr_b32 s2, s2, 1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s3
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s2
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s3, v1
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s3, v1
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v1
-; GCN-NEXT:    s_and_b64 vcc, vcc, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_lshr_b32 s4, s7, 1
+; GCN-NEXT:    s_lshr_b32 s5, s8, 1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_udiv31_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 1
+; GCN-IR-NEXT:    s_lshr_b32 s5, s8, 1
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = lshr i64 %x, 33
   %2 = lshr i64 %y, 33
   %result = udiv i64 %1, %2
@@ -422,24 +632,49 @@ define amdgpu_kernel void @s_test_udiv23_i64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-NEXT:    s_load_dword s8, s[0:1], 0xe
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    s_mov_b32 s2, -1
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_mov_b32 s0, s4
 ; GCN-NEXT:    s_mov_b32 s1, s5
 ; GCN-NEXT:    s_lshr_b32 s4, s7, 9
 ; GCN-NEXT:    s_lshr_b32 s5, s8, 9
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s4
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v0
-; GCN-NEXT:    v_mul_f32_e32 v3, v2, v3
-; GCN-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-NEXT:    v_mad_f32 v2, -v3, v0, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v0|
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
 ; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffff, v0
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_udiv23_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 9
+; GCN-IR-NEXT:    s_lshr_b32 s5, s8, 9
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0x7fffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = lshr i64 %x, 41
   %2 = lshr i64 %y, 41
   %result = udiv i64 %1, %2
@@ -570,6 +805,107 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-NEXT:    buffer_store_short v0, off, s[4:7], 0 offset:4
 ; GCN-NEXT:    buffer_store_dword v1, off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_udiv24_i48:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xb
+; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xc
+; GCN-IR-NEXT:    s_load_dword s6, s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dword s7, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s8, 0xffff
+; GCN-IR-NEXT:    s_mov_b32 s9, 0xff000000
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_and_b32 s1, s3, s8
+; GCN-IR-NEXT:    s_and_b32 s0, s2, s9
+; GCN-IR-NEXT:    s_and_b32 s3, s7, s8
+; GCN-IR-NEXT:    s_and_b32 s2, s6, s9
+; GCN-IR-NEXT:    s_lshr_b64 s[6:7], s[0:1], 24
+; GCN-IR-NEXT:    s_lshr_b64 s[2:3], s[2:3], 24
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
+; GCN-IR-NEXT:    s_add_i32 s8, s10, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    s_add_i32 s9, s12, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s9
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v0, v1
+; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB7_7
+; GCN-IR-NEXT:  BB7_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[6:7], v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB7_6
+; GCN-IR-NEXT:  BB7_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[6:7], v0
+; GCN-IR-NEXT:    s_add_u32 s6, s2, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s7, s3, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB7_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s7
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s6, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s2, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s3, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_5
+; GCN-IR-NEXT:  BB7_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB7_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    buffer_store_short v1, off, s[4:7], 0 offset:4
+; GCN-IR-NEXT:    buffer_store_dword v0, off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = lshr i48 %x, 24
   %2 = lshr i48 %y, 24
   %result = udiv i48 %1, %2
@@ -689,6 +1025,84 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_udiv_k_num_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
+; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffc5, v0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[2:3], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB8_7
+; GCN-IR-NEXT:  BB8_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], 24, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB8_6
+; GCN-IR-NEXT:  BB8_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v0
+; GCN-IR-NEXT:    s_add_u32 s3, s6, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s8, s7, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB8_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s8
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s3, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s6, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s7, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_5
+; GCN-IR-NEXT:  BB8_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB8_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, s2
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %result = udiv i64 24, %x
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -798,6 +1212,88 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v7, v8, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, 0, v1, s[4:5]
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_udiv_pow2_k_num_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
+; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
+; GCN-IR-NEXT:    s_mov_b32 s10, 0x8000
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, s6, v2
+; GCN-IR-NEXT:    s_mov_b32 s11, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s10
+; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v2, 0, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB9_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 63, v4
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[2:3], v[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[10:11], v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB9_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    s_mov_b32 s5, 0
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x8000
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[12:13], s[4:5], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:  BB9_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[12:13], v[12:13], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v12, v12, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v8, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v6, s[4:5], v9, v13, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, v12, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v13, vcc, v13, v11, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB9_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB9_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v7, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v6, v0
+; GCN-IR-NEXT:  BB9_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, v2
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = udiv i64 32768, %x
   ret i64 %result
 }
@@ -809,6 +1305,81 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_alignbit_b32 v0, v1, v0, 15
 ; GCN-NEXT:    v_lshrrev_b32_e32 v1, 15, v1
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_udiv_pow2_k_den_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
+; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v3, v2, s[4:5]
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v2
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[4:5], -1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB10_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 63, v4
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[2:3], v[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB10_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:  BB10_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v1, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v0, v6
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s12, v8
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[4:5], s[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e32 v0, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v0, 31, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0x8000, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v8, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v9, v10, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, v5
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB10_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB10_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[6:7], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
+; GCN-IR-NEXT:  BB10_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, v2
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = udiv i64 %x, 32768
   ret i64 %result
 }
@@ -921,6 +1492,81 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_udiv_k_den_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s7
+; GCN-IR-NEXT:    s_add_i32 s3, s0, 32
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 59, v0
+; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[2:3], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
+; GCN-IR-NEXT:    s_cbranch_vccz BB11_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB11_7
+; GCN-IR-NEXT:  BB11_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[6:7], v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB11_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB11_6
+; GCN-IR-NEXT:  BB11_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[6:7], v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB11_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], 23, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], 0, v9, s[0:1]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 24, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v6
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[0:1], 0, v9, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB11_5
+; GCN-IR-NEXT:  BB11_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB11_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %result = udiv i64 %x, 24
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -1027,6 +1673,79 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v10, v8, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[4:5]
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_udiv_k_den_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
+; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v3, v2, s[4:5]
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 59, v2
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[4:5], -1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB12_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 63, v4
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[2:3], v[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB12_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:  BB12_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v1, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v0, v6
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 23, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v0, s[4:5], 0, v9, s[4:5]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v0, 31, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 24, v0
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v0
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, vcc, 0, v9, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, v5
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB12_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB12_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[6:7], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
+; GCN-IR-NEXT:  BB12_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, v2
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = udiv i64 %x, 24
   ret i64 %result
 }
@@ -1038,35 +1757,45 @@ define amdgpu_kernel void @s_test_udiv24_k_num_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s2, 0x41c00000
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    s_lshr_b32 s2, s3, 8
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, 24
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s2
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
-; GCN-NEXT:    v_cmp_gt_u32_e64 s[0:1], 25, v1
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, 24, v1
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v1
-; GCN-NEXT:    s_and_b64 vcc, vcc, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
+; GCN-NEXT:    s_lshr_b32 s0, s3, 8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, s2, v1
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mad_f32 v2, -v1, v0, s2
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_udiv24_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s2, 0x41c00000
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    s_lshr_b32 s0, s3, 8
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v1, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, s2, v1
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mad_f32 v2, -v1, v0, s2
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %x.shr = lshr i64 %x, 40
   %result = udiv i64 24, %x.shr
   store i64 %result, i64 addrspace(1)* %out
@@ -1080,37 +1809,43 @@ define amdgpu_kernel void @s_test_udiv24_k_den_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_movk_i32 s2, 0x5b7f
-; GCN-NEXT:    s_movk_i32 s8, 0x5b7e
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
+; GCN-NEXT:    s_mov_b32 s2, 0x46b6fe00
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    s_lshr_b32 s3, s3, 8
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s3
-; GCN-NEXT:    v_mul_lo_u32 v1, v0, s2
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, -1, v0
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s3, v1
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s3, v1
-; GCN-NEXT:    v_cmp_lt_u32_e32 vcc, s8, v1
-; GCN-NEXT:    s_and_b64 vcc, vcc, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
+; GCN-NEXT:    s_lshr_b32 s0, s3, 8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mad_f32 v0, -v1, s2, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s2
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_udiv24_k_den_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s2, 0x46b6fe00
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    s_lshr_b32 s0, s3, 8
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mad_f32 v0, -v1, s2, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s2
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %x.shr = lshr i64 %x, 40
   %result = udiv i64 %x.shr, 23423
   store i64 %result, i64 addrspace(1)* %out
@@ -1122,31 +1857,35 @@ define i64 @v_test_udiv24_k_num_i64(i64 %x) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, v0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f800000, v1
+; GCN-NEXT:    s_mov_b32 s4, 0x41c00000
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, s4, v1
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mad_f32 v2, -v1, v0, s4
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, v1, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v0
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v3
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
-; GCN-NEXT:    v_mul_hi_u32 v2, v2, v1
-; GCN-NEXT:    v_add_i32_e64 v3, s[4:5], v1, v2
-; GCN-NEXT:    v_sub_i32_e64 v1, s[4:5], v1, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, 24
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, 1, v1
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, -1, v1
-; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, 25, v2
-; GCN-NEXT:    v_sub_i32_e64 v2, s[4:5], 24, v2
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v2, v0
-; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v1, v3, s[4:5]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_udiv24_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x41c00000
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v1, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, s4, v1
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mad_f32 v2, -v1, v0, s4
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = lshr i64 %x, 40
   %result = udiv i64 24, %x.shr
   ret i64 %result
@@ -1157,32 +1896,35 @@ define i64 @v_test_udiv24_pow2_k_num_i64(i64 %x) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
-; GCN-NEXT:    s_mov_b32 s6, 0x8001
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, v0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f800000, v1
+; GCN-NEXT:    s_mov_b32 s4, 0x47000000
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, s4, v1
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mad_f32 v2, -v1, v0, s4
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, v1, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v0
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v3
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
-; GCN-NEXT:    v_mul_hi_u32 v2, v2, v1
-; GCN-NEXT:    v_add_i32_e64 v3, s[4:5], v1, v2
-; GCN-NEXT:    v_sub_i32_e64 v1, s[4:5], v1, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
-; GCN-NEXT:    v_lshrrev_b32_e32 v1, 17, v1
-; GCN-NEXT:    v_mul_u32_u24_e32 v2, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, 1, v1
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, -1, v1
-; GCN-NEXT:    v_cmp_gt_u32_e64 s[4:5], s6, v2
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 0x8000, v2
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v0
-; GCN-NEXT:    s_and_b64 vcc, vcc, s[4:5]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v4, v0, s[4:5]
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_udiv24_pow2_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x47000000
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v1, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, s4, v1
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mad_f32 v2, -v1, v0, s4
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = lshr i64 %x, 40
   %result = udiv i64 32768, %x.shr
   ret i64 %result
@@ -1195,6 +1937,22 @@ define i64 @v_test_udiv24_pow2_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_lshrrev_b32_e32 v0, 23, v1
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_udiv24_pow2_k_den_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x47000000
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, 0x38000000, v0
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mad_f32 v0, -v1, s4, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s4
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = lshr i64 %x, 40
   %result = udiv i64 %x.shr, 32768
   ret i64 %result

diff  --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 220f6ad57dde..845d862eb0db 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -1,5 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
+; RUN: llc -march=amdgcn -amdgpu-codegenprepare-expand-div64 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-IR %s
 
 define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_urem_i64:
@@ -122,6 +123,106 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_urem_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s2
+; GCN-IR-NEXT:    s_add_i32 s11, s0, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s3
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-IR-NEXT:    s_add_i32 s8, s10, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s11
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v0, v1
+; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB0_7
+; GCN-IR-NEXT:  BB0_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[6:7], v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB0_6
+; GCN-IR-NEXT:  BB0_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[6:7], v0
+; GCN-IR-NEXT:    s_add_u32 s8, s2, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s9, s3, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB0_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s9
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s8, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s2, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s3, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  BB0_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB0_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s11, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s10, -1
+; GCN-IR-NEXT:    s_mov_b32 s8, s4
+; GCN-IR-NEXT:    s_mov_b32 s9, s5
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s2, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s3, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s2, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, s7
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v4, v1, vcc
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-IR-NEXT:    s_endpgm
   %result = urem i64 %x, %y
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -239,6 +340,95 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v4, v7, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v2, s[4:5]
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_urem_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v6, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v7, v1
+; GCN-IR-NEXT:    s_or_b64 s[6:7], vcc, s[4:5]
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 32, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 32, v6
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v4, v5, v4, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v5, v7, v6, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v4, v5
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[6:7], vcc
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v1, 0, s[6:7]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[6:7], -1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v0, 0, s[6:7]
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB1_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, 63, v6
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[4:5], v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB1_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[14:15], v[0:1], v4
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:  BB1_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[14:15], v[14:15], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v9
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, -1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, -1, v5, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v14, v14, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v9, v13, v9
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v12, v8
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v10, v14
+; GCN-IR-NEXT:    v_subb_u32_e64 v6, s[4:5], v11, v15, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v14, vcc, v14, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v15, vcc, v15, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB1_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB1_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[8:9], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:  BB1_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_mul_lo_u32 v5, v2, v5
+; GCN-IR-NEXT:    v_mul_hi_u32 v6, v2, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, v3, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v2, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v6, v5
+; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = urem i64 %x, %y
   ret i64 %result
 }
@@ -247,40 +437,56 @@ define amdgpu_kernel void @s_test_urem31_i64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_urem31_i64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s2, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    s_lshr_b32 s3, s7, 1
-; GCN-NEXT:    s_lshr_b32 s4, s2, 1
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_lshr_b32 s4, s7, 1
+; GCN-NEXT:    s_lshr_b32 s5, s8, 1
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s4
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s4
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s3
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s4
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s3, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, s4, v1
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s3, v0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s4, v1
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s4, v1
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s5
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_urem31_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 1
+; GCN-IR-NEXT:    s_lshr_b32 s5, s8, 1
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s5
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = lshr i64 %x, 33
   %2 = lshr i64 %y, 33
   %result = urem i64 %1, %2
@@ -291,66 +497,87 @@ define amdgpu_kernel void @s_test_urem31_i64(i64 addrspace(1)* %out, i64 %x, i64
 define amdgpu_kernel void @s_test_urem31_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> %x, <2 x i64> %y) {
 ; GCN-LABEL: s_test_urem31_v2i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x9
-; GCN-NEXT:    s_load_dwordx4 s[12:15], s[0:1], 0xd
-; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x11
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x9
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
+; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x11
+; GCN-NEXT:    s_mov_b32 s15, 0xf000
+; GCN-NEXT:    s_mov_b32 s14, -1
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_brev_b32 s0, -2
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_lshr_b32 s4, s13, 1
-; GCN-NEXT:    s_lshr_b32 s6, s15, 1
-; GCN-NEXT:    s_lshr_b32 s12, s5, 1
-; GCN-NEXT:    s_lshr_b32 s5, s7, 1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s12
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v2
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_mul_f32_e32 v2, 0x4f800000, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s5
-; GCN-NEXT:    v_mul_lo_u32 v4, v0, s5
-; GCN-NEXT:    v_mul_hi_u32 v5, v2, s12
-; GCN-NEXT:    v_mul_lo_u32 v6, v2, s12
-; GCN-NEXT:    v_sub_i32_e32 v7, vcc, 0, v4
-; GCN-NEXT:    v_sub_i32_e32 v8, vcc, 0, v6
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v4, v7, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], 0, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v8, s[2:3]
-; GCN-NEXT:    v_mul_hi_u32 v3, v3, v0
-; GCN-NEXT:    v_mul_hi_u32 v4, v4, v2
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v3, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v3, v0
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v2
-; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, v4, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v5, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[2:3]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s6
-; GCN-NEXT:    v_mul_hi_u32 v2, v2, s4
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s5
-; GCN-NEXT:    v_mul_lo_u32 v2, v2, s12
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s6, v0
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s4, v2
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, s5, v3
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s6, v0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s5, v3
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s5, v3
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, s12, v4
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], s4, v2
-; GCN-NEXT:    v_cmp_le_u32_e64 s[6:7], s12, v4
-; GCN-NEXT:    v_subrev_i32_e32 v2, vcc, s12, v4
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
-; GCN-NEXT:    s_and_b64 vcc, s[6:7], s[4:5]
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v4, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v0, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v6, v3, s[4:5]
+; GCN-NEXT:    s_lshr_b32 s1, s7, 1
+; GCN-NEXT:    s_lshr_b32 s2, s5, 1
+; GCN-NEXT:    s_lshr_b32 s3, s11, 1
+; GCN-NEXT:    s_lshr_b32 s4, s9, 1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s4
+; GCN-NEXT:    v_cvt_f32_u32_e32 v3, s1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v4, s3
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v5, v2
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v6, v4
+; GCN-NEXT:    v_mul_f32_e32 v5, v0, v5
+; GCN-NEXT:    v_mul_f32_e32 v6, v3, v6
+; GCN-NEXT:    v_trunc_f32_e32 v5, v5
+; GCN-NEXT:    v_trunc_f32_e32 v6, v6
+; GCN-NEXT:    v_mad_f32 v0, -v5, v2, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v5, v5
+; GCN-NEXT:    v_mad_f32 v3, -v6, v4, v3
+; GCN-NEXT:    v_cvt_u32_f32_e32 v6, v6
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v2
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v5, vcc
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v4
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v6, vcc
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s4
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, s3
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s2, v0
+; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s1, v2
+; GCN-NEXT:    v_and_b32_e32 v0, s0, v0
+; GCN-NEXT:    v_and_b32_e32 v2, s0, v2
 ; GCN-NEXT:    v_mov_b32_e32 v3, v1
-; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[12:15], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_urem31_v2i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x11
+; GCN-IR-NEXT:    s_mov_b32 s15, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s14, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_brev_b32 s0, -2
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_lshr_b32 s1, s7, 1
+; GCN-IR-NEXT:    s_lshr_b32 s2, s5, 1
+; GCN-IR-NEXT:    s_lshr_b32 s3, s11, 1
+; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 1
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v2, s4
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v3, s1
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v4, s3
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v5, v2
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v6, v4
+; GCN-IR-NEXT:    v_mul_f32_e32 v5, v0, v5
+; GCN-IR-NEXT:    v_mul_f32_e32 v6, v3, v6
+; GCN-IR-NEXT:    v_trunc_f32_e32 v5, v5
+; GCN-IR-NEXT:    v_trunc_f32_e32 v6, v6
+; GCN-IR-NEXT:    v_mad_f32 v0, -v5, v2, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v5, v5
+; GCN-IR-NEXT:    v_mad_f32 v3, -v6, v4, v3
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v6, v6
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v6, vcc
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s4
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v2, s3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s2, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s1, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v0, s0, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v2, s0, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, v1
+; GCN-IR-NEXT:    buffer_store_dwordx4 v[0:3], off, s[12:15], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = lshr <2 x i64> %x, <i64 33, i64 33>
   %2 = lshr <2 x i64> %y, <i64 33, i64 33>
   %result = urem <2 x i64> %1, %2
@@ -362,40 +589,56 @@ define amdgpu_kernel void @s_test_urem24_i64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-LABEL: s_test_urem24_i64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s2, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s8, s4
-; GCN-NEXT:    s_mov_b32 s9, s5
-; GCN-NEXT:    s_lshr_b32 s3, s7, 8
-; GCN-NEXT:    s_lshr_b32 s4, s2, 8
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_lshr_b32 s4, s7, 8
+; GCN-NEXT:    s_lshr_b32 s5, s8, 8
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s4
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s4
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s3
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s4
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s3, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, s4, v1
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s3, v0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s4, v1
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s4, v1
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s5
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_urem24_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 8
+; GCN-IR-NEXT:    s_lshr_b32 s5, s8, 8
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, v0, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s5
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s4, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = lshr i64 %x, 40
   %2 = lshr i64 %y, 40
   %result = urem i64 %1, %2
@@ -406,55 +649,87 @@ define amdgpu_kernel void @s_test_urem24_i64(i64 addrspace(1)* %out, i64 %x, i64
 define amdgpu_kernel void @s_test_urem23_64_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> %x, <2 x i64> %y) {
 ; GCN-LABEL: s_test_urem23_64_v2i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0xd
-; GCN-NEXT:    s_load_dwordx4 s[12:15], s[0:1], 0x11
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x9
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
+; GCN-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x11
+; GCN-NEXT:    s_mov_b32 s15, 0xf000
+; GCN-NEXT:    s_mov_b32 s14, -1
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_brev_b32 s0, -2
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_lshr_b32 s2, s11, 9
-; GCN-NEXT:    s_lshr_b32 s3, s9, 1
-; GCN-NEXT:    s_lshr_b32 s8, s15, 9
-; GCN-NEXT:    s_lshr_b32 s9, s13, 1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s9
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s2
-; GCN-NEXT:    v_cvt_f32_u32_e32 v3, s8
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v4, v3
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_mul_f32_e32 v4, v2, v4
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_trunc_f32_e32 v4, v4
-; GCN-NEXT:    v_mul_hi_u32 v5, v0, s9
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, s9
-; GCN-NEXT:    v_mad_f32 v2, -v4, v3, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v4, v4
-; GCN-NEXT:    v_sub_i32_e32 v7, vcc, 0, v6
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, |v3|
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v4, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v6, v7, s[0:1]
-; GCN-NEXT:    v_mul_lo_u32 v2, v2, s8
-; GCN-NEXT:    v_mul_hi_u32 v3, v3, v0
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s2, v2
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, v3, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v3, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v4, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s3
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s9
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s3, v0
-; GCN-NEXT:    v_add_i32_e32 v4, vcc, s9, v3
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s3, v0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s9, v3
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s9, v3
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v4, v0, s[0:1]
-; GCN-NEXT:    v_and_b32_e32 v2, 0x7fffff, v2
+; GCN-NEXT:    s_lshr_b32 s1, s7, 9
+; GCN-NEXT:    s_lshr_b32 s2, s5, 1
+; GCN-NEXT:    s_lshr_b32 s3, s11, 9
+; GCN-NEXT:    s_lshr_b32 s4, s9, 1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s4
+; GCN-NEXT:    v_cvt_f32_u32_e32 v3, s1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v4, s3
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v5, v2
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v6, v4
+; GCN-NEXT:    v_mul_f32_e32 v5, v0, v5
+; GCN-NEXT:    v_mul_f32_e32 v6, v3, v6
+; GCN-NEXT:    v_trunc_f32_e32 v5, v5
+; GCN-NEXT:    v_trunc_f32_e32 v6, v6
+; GCN-NEXT:    v_mad_f32 v0, -v5, v2, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v5, v5
+; GCN-NEXT:    v_mad_f32 v3, -v6, v4, v3
+; GCN-NEXT:    v_cvt_u32_f32_e32 v6, v6
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v2
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v5, vcc
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v4
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v6, vcc
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s4
+; GCN-NEXT:    v_mul_lo_u32 v2, v2, s3
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s2, v0
+; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s1, v2
+; GCN-NEXT:    v_and_b32_e32 v0, s0, v0
+; GCN-NEXT:    v_and_b32_e32 v2, s0, v2
 ; GCN-NEXT:    v_mov_b32_e32 v3, v1
-; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[12:15], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_urem23_64_v2i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x11
+; GCN-IR-NEXT:    s_mov_b32 s15, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s14, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_brev_b32 s0, -2
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_lshr_b32 s1, s7, 9
+; GCN-IR-NEXT:    s_lshr_b32 s2, s5, 1
+; GCN-IR-NEXT:    s_lshr_b32 s3, s11, 9
+; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 1
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v2, s4
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v3, s1
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v4, s3
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v5, v2
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v6, v4
+; GCN-IR-NEXT:    v_mul_f32_e32 v5, v0, v5
+; GCN-IR-NEXT:    v_mul_f32_e32 v6, v3, v6
+; GCN-IR-NEXT:    v_trunc_f32_e32 v5, v5
+; GCN-IR-NEXT:    v_trunc_f32_e32 v6, v6
+; GCN-IR-NEXT:    v_mad_f32 v0, -v5, v2, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v5, v5
+; GCN-IR-NEXT:    v_mad_f32 v3, -v6, v4, v3
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v6, v6
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v6, vcc
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s4
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v2, s3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s2, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s1, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v0, s0, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v2, s0, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, v1
+; GCN-IR-NEXT:    buffer_store_dwordx4 v[0:3], off, s[12:15], 0
+; GCN-IR-NEXT:    s_endpgm
   %1 = lshr <2 x i64> %x, <i64 33, i64 41>
   %2 = lshr <2 x i64> %y, <i64 33, i64 41>
   %result = urem <2 x i64> %1, %2
@@ -573,6 +848,93 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v0, s[0:1]
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_urem_k_num_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
+; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffc5, v0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[2:3], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_cbranch_vccz BB6_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB6_7
+; GCN-IR-NEXT:  BB6_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], 24, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB6_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB6_6
+; GCN-IR-NEXT:  BB6_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v0
+; GCN-IR-NEXT:    s_add_u32 s3, s6, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_addc_u32 s8, s7, -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB6_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, s8
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], s3, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], v10, v9, s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v7, s6, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, s7, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v7
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[0:1], v9, v6, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB6_5
+; GCN-IR-NEXT:  BB6_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB6_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s6, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s6, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s7, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s6, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %result = urem i64 24, %x
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -684,6 +1046,90 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_urem_k_den_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s7
+; GCN-IR-NEXT:    s_add_i32 s3, s0, 32
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 59, v0
+; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[2:3], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[0:1], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_2
+; GCN-IR-NEXT:  ; %bb.1:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB7_7
+; GCN-IR-NEXT:  BB7_2: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[0:1], v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, 63, v2
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[6:7], v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_4
+; GCN-IR-NEXT:  ; %bb.3:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_branch BB7_6
+; GCN-IR-NEXT:  BB7_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[6:7], v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB7_5: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v5
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v7, v5
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[0:1], 23, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v2, s[0:1], 0, v9, s[0:1]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 24, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[0:1], v8, v6
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[0:1], 0, v9, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_5
+; GCN-IR-NEXT:  BB7_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
+; GCN-IR-NEXT:  BB7_7: ; %udiv-end
+; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_mov_b32 s0, s4
+; GCN-IR-NEXT:    s_mov_b32 s1, s5
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, 24
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, v0, 24
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, 24
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, s7
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v3, v1, vcc
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_endpgm
   %result = urem i64 %x, 24
   store i64 %result, i64 addrspace(1)* %out
   ret void
@@ -793,6 +1239,94 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v4, v7, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[4:5]
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_urem_pow2_k_num_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
+; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
+; GCN-IR-NEXT:    s_mov_b32 s10, 0x8000
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
+; GCN-IR-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, s6, v2
+; GCN-IR-NEXT:    s_mov_b32 s11, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s10
+; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v2, 0, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB8_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 63, v4
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[2:3], v[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[10:11], v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB8_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    s_mov_b32 s5, 0
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x8000
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[12:13], s[4:5], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:  BB8_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[12:13], v[12:13], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v12, v12, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v8, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v6, s[4:5], v9, v13, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, v12, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v13, vcc, v13, v11, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB8_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB8_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT:  BB8_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GCN-IR-NEXT:    v_mul_hi_u32 v4, v0, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v2
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v4, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 0x8000, v0
+; GCN-IR-NEXT:    v_add_i32_e64 v1, s[4:5], v2, v1
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = urem i64 32768, %x
   ret i64 %result
 }
@@ -804,6 +1338,82 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_urem_pow2_k_den_i64:
+; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
+; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v3, v2, s[4:5]
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v2
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v1, 0, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[4:5], -1
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v0, 0, s[4:5]
+; GCN-IR-NEXT:    s_and_b64 s[4:5], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB9_6
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 63, v4
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[2:3], v[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], exec, s[4:5]
+; GCN-IR-NEXT:    s_cbranch_execz BB9_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:  BB9_3: ; %udiv-do-while
+; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v7
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v9, v7
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v8, v6
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[2:3]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v10
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[4:5], s[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v10, vcc, v10, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v11, vcc, v11, v12, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_execnz BB9_3
+; GCN-IR-NEXT:  ; %bb.4: ; %Flow
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  BB9_5: ; %Flow1
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[6:7], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v5, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v4, v2
+; GCN-IR-NEXT:  BB9_6: ; %Flow2
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 15
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = urem i64 %x, 32768
   ret i64 %result
 }
@@ -815,35 +1425,49 @@ define amdgpu_kernel void @s_test_urem24_k_num_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_mov_b32 s2, 0x41c00000
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    s_lshr_b32 s8, s3, 8
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s8
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s8
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, 24
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s8
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, 24, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, s8, v1
-; GCN-NEXT:    v_cmp_gt_u32_e64 s[0:1], 25, v0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s8, v1
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v1
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
+; GCN-NEXT:    s_lshr_b32 s0, s3, 8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, s2, v1
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mad_f32 v2, -v1, v0, s2
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s0
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_urem24_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_mov_b32 s2, 0x41c00000
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    s_lshr_b32 s0, s3, 8
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v1, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, s2, v1
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mad_f32 v2, -v1, v0, s2
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s0
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %x.shr = lshr i64 %x, 40
   %result = urem i64 24, %x.shr
   store i64 %result, i64 addrspace(1)* %out
@@ -856,38 +1480,50 @@ define amdgpu_kernel void @s_test_urem24_k_den_i64(i64 addrspace(1)* %out, i64 %
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_mov_b32 s8, 0x46b6fe00
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_movk_i32 s2, 0x5b7f
-; GCN-NEXT:    s_movk_i32 s8, 0x5b7e
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    s_lshr_b32 s3, s3, 8
-; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_hi_u32 v1, v0, s2
-; GCN-NEXT:    v_mul_lo_u32 v2, v0, s2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v2
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], 0, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v1, v0
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s3
+; GCN-NEXT:    s_lshr_b32 s0, s3, 8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_mad_f32 v0, -v1, s8, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s8
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_mul_lo_u32 v0, v0, s2
-; GCN-NEXT:    v_sub_i32_e32 v1, vcc, s3, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, s2, v1
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[0:1], s3, v0
-; GCN-NEXT:    v_cmp_lt_u32_e64 s[2:3], s8, v1
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, 0xffffa481, v1
-; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s0, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
+;
+; GCN-IR-LABEL: s_test_urem24_k_den_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_mov_b32 s8, 0x46b6fe00
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_movk_i32 s2, 0x5b7f
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    s_lshr_b32 s0, s3, 8
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, 0x38331158, v0
+; GCN-IR-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_mad_f32 v0, -v1, s8, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, s8
+; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s2
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s0, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-IR-NEXT:    s_endpgm
   %x.shr = lshr i64 %x, 40
   %result = urem i64 %x.shr, 23423
   store i64 %result, i64 addrspace(1)* %out
@@ -899,31 +1535,39 @@ define i64 @v_test_urem24_k_num_i64(i64 %x) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
+; GCN-NEXT:    s_mov_b32 s4, 0x41c00000
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, v0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f800000, v1
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, v1, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v0
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v3
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
-; GCN-NEXT:    v_mul_hi_u32 v2, v2, v1
-; GCN-NEXT:    v_add_i32_e64 v3, s[4:5], v1, v2
-; GCN-NEXT:    v_sub_i32_e64 v1, s[4:5], v1, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, 24
-; GCN-NEXT:    v_mul_lo_u32 v1, v1, v0
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 24, v1
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v2, v0
-; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, 25, v1
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v2, v0
-; GCN-NEXT:    v_sub_i32_e64 v0, s[6:7], v2, v0
-; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[4:5]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    v_mul_f32_e32 v2, s4, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mad_f32 v3, -v2, v1, s4
+; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v1
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_urem24_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x41c00000
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, v0
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, s4, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v3, -v2, v1, s4
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = lshr i64 %x, 40
   %result = urem i64 24, %x.shr
   ret i64 %result
@@ -934,32 +1578,39 @@ define i64 @v_test_urem24_pow2_k_num_i64(i64 %x) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
-; GCN-NEXT:    s_mov_b32 s6, 0x8001
+; GCN-NEXT:    s_mov_b32 s4, 0x47000000
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, v0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f800000, v1
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, v1, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v0
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v3
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
-; GCN-NEXT:    v_mul_hi_u32 v2, v2, v1
-; GCN-NEXT:    v_add_i32_e64 v3, s[4:5], v1, v2
-; GCN-NEXT:    v_sub_i32_e64 v1, s[4:5], v1, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
-; GCN-NEXT:    v_lshrrev_b32_e32 v1, 17, v1
-; GCN-NEXT:    v_mul_u32_u24_e32 v1, v1, v0
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 0x8000, v1
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v2, v0
-; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, s6, v1
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v2, v0
-; GCN-NEXT:    v_sub_i32_e64 v0, s[6:7], v2, v0
-; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[4:5]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-NEXT:    v_mul_f32_e32 v2, s4, v2
+; GCN-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-NEXT:    v_mad_f32 v3, -v2, v1, s4
+; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v1
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; GCN-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 0x8000, v0
+; GCN-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_urem24_pow2_k_num_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x47000000
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, v0
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v1
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, s4, v2
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v3, -v2, v1, s4
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v1, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 0x8000, v0
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = lshr i64 %x, 40
   %result = urem i64 32768, %x.shr
   ret i64 %result
@@ -972,6 +1623,24 @@ define i64 @v_test_urem24_pow2_k_den_i64(i64 %x) {
 ; GCN-NEXT:    v_bfe_u32 v0, v1, 8, 15
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
+;
+; GCN-IR-LABEL: v_test_urem24_pow2_k_den_i64:
+; GCN-IR:       ; %bb.0:
+; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 8, v1
+; GCN-IR-NEXT:    s_mov_b32 s4, 0x47000000
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, 0x38000000, v1
+; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mad_f32 v1, -v2, s4, v1
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, s4
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; GCN-IR-NEXT:    v_lshlrev_b32_e32 v1, 15, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %x.shr = lshr i64 %x, 40
   %result = urem i64 %x.shr, 32768
   ret i64 %result


        


More information about the llvm-commits mailing list