[llvm] e5638c5 - [AMDGPU] Use correct number of bits needed for div/rem shrinking (#80622)

via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 6 08:02:33 PST 2024


Author: choikwa
Date: 2024-02-06T21:32:28+05:30
New Revision: e5638c5a00682243b1ee012d7dd8292aa221dff8

URL: https://github.com/llvm/llvm-project/commit/e5638c5a00682243b1ee012d7dd8292aa221dff8
DIFF: https://github.com/llvm/llvm-project/commit/e5638c5a00682243b1ee012d7dd8292aa221dff8.diff

LOG: [AMDGPU] Use correct number of bits needed for div/rem shrinking (#80622)

There was an error where dividend of type i64 and actual used number of
bits of 32 fell into path that assumes only 24 bits being used. Check
that AtLeast field is used correctly when using computeNumSignBits and
add necessary extend/trunc for 32 bits path.

Regolden and update testcases.

@jrbyrnes @bcahoon @arsenm @rampitec

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i32.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i32.ll
    llvm/test/CodeGen/AMDGPU/bypass-div.ll
    llvm/test/CodeGen/AMDGPU/sdiv64.ll
    llvm/test/CodeGen/AMDGPU/srem64.ll
    llvm/test/CodeGen/AMDGPU/udiv.ll
    llvm/test/CodeGen/AMDGPU/udiv64.ll
    llvm/test/CodeGen/AMDGPU/urem64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
index c4293f59204d0..1c75c5a47c9d2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp
@@ -1213,7 +1213,10 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem24(IRBuilder<> &Builder,
                                                 BinaryOperator &I, Value *Num,
                                                 Value *Den, bool IsDiv,
                                                 bool IsSigned) const {
-  int DivBits = getDivNumBits(I, Num, Den, 9, IsSigned);
+  unsigned SSBits = Num->getType()->getScalarSizeInBits();
+  // If Num bits <= 24, assume 0 signbits.
+  unsigned AtLeast = (SSBits <= 24) ? 0 : (SSBits - 24 + IsSigned);
+  int DivBits = getDivNumBits(I, Num, Den, AtLeast, IsSigned);
   if (DivBits == -1)
     return nullptr;
   return expandDivRem24Impl(Builder, I, Num, Den, DivBits, IsDiv, IsSigned);
@@ -1385,13 +1388,13 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
   Type *I32Ty = Builder.getInt32Ty();
   Type *F32Ty = Builder.getFloatTy();
 
-  if (Ty->getScalarSizeInBits() < 32) {
+  if (Ty->getScalarSizeInBits() != 32) {
     if (IsSigned) {
-      X = Builder.CreateSExt(X, I32Ty);
-      Y = Builder.CreateSExt(Y, I32Ty);
+      X = Builder.CreateSExtOrTrunc(X, I32Ty);
+      Y = Builder.CreateSExtOrTrunc(Y, I32Ty);
     } else {
-      X = Builder.CreateZExt(X, I32Ty);
-      Y = Builder.CreateZExt(Y, I32Ty);
+      X = Builder.CreateZExtOrTrunc(X, I32Ty);
+      Y = Builder.CreateZExtOrTrunc(Y, I32Ty);
     }
   }
 
@@ -1482,10 +1485,10 @@ Value *AMDGPUCodeGenPrepareImpl::expandDivRem32(IRBuilder<> &Builder,
   if (IsSigned) {
     Res = Builder.CreateXor(Res, Sign);
     Res = Builder.CreateSub(Res, Sign);
+    Res = Builder.CreateSExtOrTrunc(Res, Ty);
+  } else {
+    Res = Builder.CreateZExtOrTrunc(Res, Ty);
   }
-
-  Res = Builder.CreateTrunc(Res, Ty);
-
   return Res;
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
index 3eb6f1eced095..0a6b7af2f78d4 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
@@ -3055,19 +3055,29 @@ define i64 @v_sdiv_i64_24bit(i64 %num, i64 %den) {
 ; CGP-LABEL: v_sdiv_i64_24bit:
 ; CGP:       ; %bb.0:
 ; CGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT:    v_and_b32_e32 v1, 0xffffff, v2
-; CGP-NEXT:    v_cvt_f32_i32_e32 v1, v1
-; CGP-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
-; CGP-NEXT:    v_cvt_f32_i32_e32 v0, v0
-; CGP-NEXT:    v_rcp_f32_e32 v2, v1
-; CGP-NEXT:    v_mul_f32_e32 v2, v0, v2
-; CGP-NEXT:    v_trunc_f32_e32 v2, v2
-; CGP-NEXT:    v_mad_f32 v0, -v2, v1, v0
-; CGP-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v0|, |v1|
-; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
-; CGP-NEXT:    v_bfe_i32 v0, v0, 0, 25
+; CGP-NEXT:    v_and_b32_e32 v3, 0xffffff, v2
+; CGP-NEXT:    v_cvt_f32_u32_e32 v1, v3
+; CGP-NEXT:    v_and_b32_e32 v5, 0xffffff, v0
+; CGP-NEXT:    v_rcp_f32_e32 v1, v1
+; CGP-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; CGP-NEXT:    v_cvt_u32_f32_e32 v4, v1
+; CGP-NEXT:    v_sub_i32_e32 v1, vcc, 0, v3
+; CGP-NEXT:    v_mul_lo_u32 v1, v1, v4
+; CGP-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v4, v1, 0
+; CGP-NEXT:    v_mov_b32_e32 v0, v2
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v4, v0
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v5, v0, 0
+; CGP-NEXT:    v_mov_b32_e32 v0, v1
+; CGP-NEXT:    v_mul_lo_u32 v1, v0, v3
+; CGP-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
+; CGP-NEXT:    v_sub_i32_e32 v1, vcc, v5, v1
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; CGP-NEXT:    v_sub_i32_e64 v2, s[4:5], v1, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; CGP-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; CGP-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; CGP-NEXT:    s_setpc_b64 s[30:31]
   %num.mask = and i64 %num, 16777215
@@ -3335,32 +3345,52 @@ define <2 x i64> @v_sdiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
 ; CGP-LABEL: v_sdiv_v2i64_24bit:
 ; CGP:       ; %bb.0:
 ; CGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT:    v_and_b32_e32 v1, 0xffffff, v4
-; CGP-NEXT:    v_cvt_f32_i32_e32 v1, v1
-; CGP-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
-; CGP-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; CGP-NEXT:    v_and_b32_e32 v3, 0xffffff, v4
+; CGP-NEXT:    v_cvt_f32_u32_e32 v1, v3
 ; CGP-NEXT:    v_and_b32_e32 v4, 0xffffff, v6
-; CGP-NEXT:    v_rcp_f32_e32 v3, v1
-; CGP-NEXT:    v_cvt_f32_i32_e32 v4, v4
+; CGP-NEXT:    v_sub_i32_e32 v6, vcc, 0, v3
+; CGP-NEXT:    v_rcp_f32_e32 v1, v1
+; CGP-NEXT:    v_and_b32_e32 v7, 0xffffff, v0
 ; CGP-NEXT:    v_and_b32_e32 v2, 0xffffff, v2
-; CGP-NEXT:    v_cvt_f32_i32_e32 v2, v2
-; CGP-NEXT:    v_mul_f32_e32 v3, v0, v3
-; CGP-NEXT:    v_trunc_f32_e32 v3, v3
-; CGP-NEXT:    v_mad_f32 v0, -v3, v1, v0
-; CGP-NEXT:    v_cvt_i32_f32_e32 v3, v3
-; CGP-NEXT:    v_rcp_f32_e32 v5, v4
-; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v0|, |v1|
-; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v3, v0
-; CGP-NEXT:    v_mul_f32_e32 v3, v2, v5
-; CGP-NEXT:    v_trunc_f32_e32 v3, v3
-; CGP-NEXT:    v_mad_f32 v2, -v3, v4, v2
-; CGP-NEXT:    v_cvt_i32_f32_e32 v3, v3
-; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v2|, |v4|
-; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[4:5]
-; CGP-NEXT:    v_bfe_i32 v0, v0, 0, 25
-; CGP-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; CGP-NEXT:    v_bfe_i32 v2, v2, 0, 25
+; CGP-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; CGP-NEXT:    v_cvt_u32_f32_e32 v5, v1
+; CGP-NEXT:    v_cvt_f32_u32_e32 v1, v4
+; CGP-NEXT:    v_mul_lo_u32 v6, v6, v5
+; CGP-NEXT:    v_rcp_f32_e32 v8, v1
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v5, v6, 0
+; CGP-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v8
+; CGP-NEXT:    v_cvt_u32_f32_e32 v6, v0
+; CGP-NEXT:    v_mov_b32_e32 v0, v1
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v5, v0
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v7, v0, 0
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, 0, v4
+; CGP-NEXT:    v_mov_b32_e32 v5, v1
+; CGP-NEXT:    v_mul_lo_u32 v0, v0, v6
+; CGP-NEXT:    v_mul_lo_u32 v1, v5, v3
+; CGP-NEXT:    v_add_i32_e32 v8, vcc, 1, v5
+; CGP-NEXT:    v_sub_i32_e32 v7, vcc, v7, v1
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v6, v0, 0
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v7, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v5, v5, v8, vcc
+; CGP-NEXT:    v_mov_b32_e32 v0, v1
+; CGP-NEXT:    v_add_i32_e64 v0, s[4:5], v6, v0
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, v0, 0
+; CGP-NEXT:    v_sub_i32_e64 v8, s[4:5], v7, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v0, v7, v8, vcc
+; CGP-NEXT:    v_mov_b32_e32 v7, v1
+; CGP-NEXT:    v_mul_lo_u32 v8, v7, v4
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, 1, v5
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v0, v5, v6, vcc
+; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v8
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, 1, v7
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v4
+; CGP-NEXT:    v_cndmask_b32_e32 v3, v7, v3, vcc
+; CGP-NEXT:    v_sub_i32_e64 v5, s[4:5], v2, v4
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, 1, v3
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v4
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v3, v5, vcc
 ; CGP-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; CGP-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; CGP-NEXT:    s_setpc_b64 s[30:31]

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
index 0b22b3b3a4ba7..c455b24313ddc 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
@@ -3000,21 +3000,27 @@ define i64 @v_srem_i64_24bit(i64 %num, i64 %den) {
 ; CGP-LABEL: v_srem_i64_24bit:
 ; CGP:       ; %bb.0:
 ; CGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT:    v_and_b32_e32 v1, 0xffffff, v2
-; CGP-NEXT:    v_cvt_f32_i32_e32 v2, v1
-; CGP-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
-; CGP-NEXT:    v_cvt_f32_i32_e32 v3, v0
-; CGP-NEXT:    v_rcp_f32_e32 v4, v2
-; CGP-NEXT:    v_mul_f32_e32 v4, v3, v4
-; CGP-NEXT:    v_trunc_f32_e32 v4, v4
-; CGP-NEXT:    v_mad_f32 v3, -v4, v2, v3
-; CGP-NEXT:    v_cvt_i32_f32_e32 v4, v4
-; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v3|, |v2|
-; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[4:5]
-; CGP-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
-; CGP-NEXT:    v_mul_lo_u32 v1, v2, v1
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
-; CGP-NEXT:    v_bfe_i32 v0, v0, 0, 25
+; CGP-NEXT:    v_and_b32_e32 v3, 0xffffff, v2
+; CGP-NEXT:    v_cvt_f32_u32_e32 v1, v3
+; CGP-NEXT:    v_and_b32_e32 v5, 0xffffff, v0
+; CGP-NEXT:    v_rcp_f32_e32 v1, v1
+; CGP-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; CGP-NEXT:    v_cvt_u32_f32_e32 v4, v1
+; CGP-NEXT:    v_sub_i32_e32 v1, vcc, 0, v3
+; CGP-NEXT:    v_mul_lo_u32 v1, v1, v4
+; CGP-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], v4, v1, 0
+; CGP-NEXT:    v_mov_b32_e32 v0, v2
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v4, v0
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v5, v0, 0
+; CGP-NEXT:    v_mov_b32_e32 v0, v1
+; CGP-NEXT:    v_mul_lo_u32 v0, v0, v3
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v5, v0
+; CGP-NEXT:    v_sub_i32_e32 v1, vcc, v0, v3
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; CGP-NEXT:    v_sub_i32_e32 v1, vcc, v0, v3
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; CGP-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; CGP-NEXT:    s_setpc_b64 s[30:31]
   %num.mask = and i64 %num, 16777215
@@ -3282,37 +3288,47 @@ define <2 x i64> @v_srem_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
 ; CGP-LABEL: v_srem_v2i64_24bit:
 ; CGP:       ; %bb.0:
 ; CGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CGP-NEXT:    v_and_b32_e32 v1, 0xffffff, v4
-; CGP-NEXT:    v_cvt_f32_i32_e32 v3, v1
-; CGP-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
-; CGP-NEXT:    v_cvt_f32_i32_e32 v4, v0
-; CGP-NEXT:    v_and_b32_e32 v6, 0xffffff, v6
-; CGP-NEXT:    v_rcp_f32_e32 v5, v3
+; CGP-NEXT:    v_and_b32_e32 v3, 0xffffff, v4
+; CGP-NEXT:    v_cvt_f32_u32_e32 v1, v3
+; CGP-NEXT:    v_and_b32_e32 v4, 0xffffff, v6
+; CGP-NEXT:    v_sub_i32_e32 v6, vcc, 0, v3
+; CGP-NEXT:    v_rcp_f32_e32 v1, v1
+; CGP-NEXT:    v_and_b32_e32 v7, 0xffffff, v0
 ; CGP-NEXT:    v_and_b32_e32 v2, 0xffffff, v2
-; CGP-NEXT:    v_mul_f32_e32 v5, v4, v5
-; CGP-NEXT:    v_trunc_f32_e32 v5, v5
-; CGP-NEXT:    v_mad_f32 v4, -v5, v3, v4
-; CGP-NEXT:    v_cvt_i32_f32_e32 v5, v5
-; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v4|, |v3|
-; CGP-NEXT:    v_cvt_f32_i32_e32 v4, v6
-; CGP-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
-; CGP-NEXT:    v_mul_lo_u32 v1, v3, v1
-; CGP-NEXT:    v_cvt_f32_i32_e32 v3, v2
-; CGP-NEXT:    v_rcp_f32_e32 v5, v4
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
-; CGP-NEXT:    v_mul_f32_e32 v1, v3, v5
-; CGP-NEXT:    v_trunc_f32_e32 v1, v1
-; CGP-NEXT:    v_mad_f32 v3, -v1, v4, v3
-; CGP-NEXT:    v_cvt_i32_f32_e32 v1, v1
-; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v3|, |v4|
-; CGP-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
-; CGP-NEXT:    v_bfe_i32 v0, v0, 0, 25
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; CGP-NEXT:    v_mul_lo_u32 v3, v1, v6
+; CGP-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; CGP-NEXT:    v_cvt_u32_f32_e32 v5, v1
+; CGP-NEXT:    v_cvt_f32_u32_e32 v1, v4
+; CGP-NEXT:    v_mul_lo_u32 v6, v6, v5
+; CGP-NEXT:    v_rcp_f32_e32 v8, v1
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v5, v6, 0
+; CGP-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v8
+; CGP-NEXT:    v_cvt_u32_f32_e32 v6, v0
+; CGP-NEXT:    v_mov_b32_e32 v0, v1
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v5, v0
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v7, v0, 0
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, 0, v4
+; CGP-NEXT:    v_mul_lo_u32 v0, v0, v6
+; CGP-NEXT:    v_mul_lo_u32 v5, v1, v3
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v6, v0, 0
+; CGP-NEXT:    v_sub_i32_e32 v5, vcc, v7, v5
+; CGP-NEXT:    v_mov_b32_e32 v0, v1
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v6, v0
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v2, v0, 0
+; CGP-NEXT:    v_sub_i32_e32 v7, vcc, v5, v3
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v3
+; CGP-NEXT:    v_mul_lo_u32 v6, v1, v4
+; CGP-NEXT:    v_cndmask_b32_e32 v0, v5, v7, vcc
+; CGP-NEXT:    v_sub_i32_e32 v5, vcc, v0, v3
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v0, v0, v5, vcc
+; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v6
+; CGP-NEXT:    v_sub_i32_e32 v3, vcc, v2, v4
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v4
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
+; CGP-NEXT:    v_sub_i32_e32 v3, vcc, v2, v4
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v4
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
 ; CGP-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v3
-; CGP-NEXT:    v_bfe_i32 v2, v2, 0, 25
 ; CGP-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; CGP-NEXT:    s_setpc_b64 s[30:31]
   %num.mask = and <2 x i64> %num, <i64 16777215, i64 16777215>

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i32.ll
index 6588112973f4c..cd01148fa7dd7 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i32.ll
@@ -415,25 +415,17 @@ define i32 @v_udiv_i32_24bit(i32 %num, i32 %den) {
 ; CGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CGP-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; CGP-NEXT:    v_and_b32_e32 v1, 0xffffff, v1
-; CGP-NEXT:    v_cvt_f32_u32_e32 v2, v1
-; CGP-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
-; CGP-NEXT:    v_rcp_f32_e32 v2, v2
-; CGP-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
+; CGP-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; CGP-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; CGP-NEXT:    v_rcp_f32_e32 v2, v1
+; CGP-NEXT:    v_mul_f32_e32 v2, v0, v2
+; CGP-NEXT:    v_trunc_f32_e32 v2, v2
+; CGP-NEXT:    v_fma_f32 v0, -v2, v1, v0
 ; CGP-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; CGP-NEXT:    v_mul_lo_u32 v3, v3, v2
-; CGP-NEXT:    v_mul_hi_u32 v3, v2, v3
-; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; CGP-NEXT:    v_mul_hi_u32 v2, v0, v2
-; CGP-NEXT:    v_mul_lo_u32 v3, v2, v1
-; CGP-NEXT:    v_add_i32_e32 v4, vcc, 1, v2
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v3
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v1
-; CGP-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
-; CGP-NEXT:    v_sub_i32_e64 v3, s[4:5], v0, v1
-; CGP-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, 1, v2
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v1
-; CGP-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v0|, v1
+; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
+; CGP-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; CGP-NEXT:    s_setpc_b64 s[30:31]
   %num.mask = and i32 %num, 16777215
   %den.mask = and i32 %den, 16777215
@@ -496,44 +488,28 @@ define <2 x i32> @v_udiv_v2i32_24bit(<2 x i32> %num, <2 x i32> %den) {
 ; CGP-NEXT:    v_and_b32_e32 v1, 0xffffff, v1
 ; CGP-NEXT:    v_and_b32_e32 v2, 0xffffff, v2
 ; CGP-NEXT:    v_and_b32_e32 v3, 0xffffff, v3
-; CGP-NEXT:    v_cvt_f32_u32_e32 v4, v2
-; CGP-NEXT:    v_sub_i32_e32 v5, vcc, 0, v2
-; CGP-NEXT:    v_cvt_f32_u32_e32 v6, v3
-; CGP-NEXT:    v_sub_i32_e32 v7, vcc, 0, v3
-; CGP-NEXT:    v_rcp_f32_e32 v4, v4
-; CGP-NEXT:    v_rcp_f32_e32 v6, v6
-; CGP-NEXT:    v_mul_f32_e32 v4, 0x4f7ffffe, v4
-; CGP-NEXT:    v_mul_f32_e32 v6, 0x4f7ffffe, v6
+; CGP-NEXT:    v_cvt_f32_u32_e32 v0, v0
+; CGP-NEXT:    v_cvt_f32_u32_e32 v2, v2
+; CGP-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; CGP-NEXT:    v_cvt_f32_u32_e32 v3, v3
+; CGP-NEXT:    v_rcp_f32_e32 v4, v2
+; CGP-NEXT:    v_rcp_f32_e32 v5, v3
+; CGP-NEXT:    v_mul_f32_e32 v4, v0, v4
+; CGP-NEXT:    v_mul_f32_e32 v5, v1, v5
+; CGP-NEXT:    v_trunc_f32_e32 v4, v4
+; CGP-NEXT:    v_trunc_f32_e32 v5, v5
+; CGP-NEXT:    v_fma_f32 v0, -v4, v2, v0
 ; CGP-NEXT:    v_cvt_u32_f32_e32 v4, v4
-; CGP-NEXT:    v_cvt_u32_f32_e32 v6, v6
-; CGP-NEXT:    v_mul_lo_u32 v5, v5, v4
-; CGP-NEXT:    v_mul_lo_u32 v7, v7, v6
-; CGP-NEXT:    v_mul_hi_u32 v5, v4, v5
-; CGP-NEXT:    v_mul_hi_u32 v7, v6, v7
-; CGP-NEXT:    v_add_i32_e32 v4, vcc, v4, v5
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v7
-; CGP-NEXT:    v_mul_hi_u32 v4, v0, v4
-; CGP-NEXT:    v_mul_hi_u32 v5, v1, v5
-; CGP-NEXT:    v_mul_lo_u32 v6, v4, v2
-; CGP-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
-; CGP-NEXT:    v_mul_lo_u32 v8, v5, v3
-; CGP-NEXT:    v_add_i32_e32 v9, vcc, 1, v5
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v6
-; CGP-NEXT:    v_sub_i32_e32 v1, vcc, v1, v8
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
-; CGP-NEXT:    v_cndmask_b32_e32 v4, v4, v7, vcc
-; CGP-NEXT:    v_sub_i32_e64 v6, s[4:5], v0, v2
-; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v1, v3
-; CGP-NEXT:    v_cndmask_b32_e64 v5, v5, v9, s[4:5]
-; CGP-NEXT:    v_sub_i32_e64 v7, s[6:7], v1, v3
-; CGP-NEXT:    v_cndmask_b32_e32 v0, v0, v6, vcc
-; CGP-NEXT:    v_add_i32_e32 v6, vcc, 1, v4
-; CGP-NEXT:    v_cndmask_b32_e64 v1, v1, v7, s[4:5]
-; CGP-NEXT:    v_add_i32_e32 v7, vcc, 1, v5
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
-; CGP-NEXT:    v_cndmask_b32_e32 v0, v4, v6, vcc
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v3
-; CGP-NEXT:    v_cndmask_b32_e32 v1, v5, v7, vcc
+; CGP-NEXT:    v_fma_f32 v1, -v5, v3, v1
+; CGP-NEXT:    v_cvt_u32_f32_e32 v5, v5
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v0|, v2
+; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, v3
+; CGP-NEXT:    v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v4, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
+; CGP-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; CGP-NEXT:    v_and_b32_e32 v1, 0xffffff, v1
 ; CGP-NEXT:    s_setpc_b64 s[30:31]
   %num.mask = and <2 x i32> %num, <i32 16777215, i32 16777215>
   %den.mask = and <2 x i32> %den, <i32 16777215, i32 16777215>

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i32.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i32.ll
index 158403644607a..31f61b9968b8b 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i32.ll
@@ -445,23 +445,19 @@ define i32 @v_urem_i32_24bit(i32 %num, i32 %den) {
 ; CGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CGP-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; CGP-NEXT:    v_and_b32_e32 v1, 0xffffff, v1
-; CGP-NEXT:    v_cvt_f32_u32_e32 v2, v1
-; CGP-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
-; CGP-NEXT:    v_rcp_f32_e32 v2, v2
-; CGP-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; CGP-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; CGP-NEXT:    v_mul_lo_u32 v3, v3, v2
-; CGP-NEXT:    v_mul_hi_u32 v3, v2, v3
-; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; CGP-NEXT:    v_mul_hi_u32 v2, v0, v2
-; CGP-NEXT:    v_mul_lo_u32 v2, v2, v1
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
-; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v0, v1
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v1
-; CGP-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v0, v1
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v1
-; CGP-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; CGP-NEXT:    v_cvt_f32_u32_e32 v2, v0
+; CGP-NEXT:    v_cvt_f32_u32_e32 v3, v1
+; CGP-NEXT:    v_rcp_f32_e32 v4, v3
+; CGP-NEXT:    v_mul_f32_e32 v4, v2, v4
+; CGP-NEXT:    v_trunc_f32_e32 v4, v4
+; CGP-NEXT:    v_fma_f32 v2, -v4, v3, v2
+; CGP-NEXT:    v_cvt_u32_f32_e32 v4, v4
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v2|, v3
+; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[4:5]
+; CGP-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; CGP-NEXT:    v_mul_lo_u32 v1, v2, v1
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
+; CGP-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
 ; CGP-NEXT:    s_setpc_b64 s[30:31]
   %num.mask = and i32 %num, 16777215
   %den.mask = and i32 %den, 16777215
@@ -520,40 +516,32 @@ define <2 x i32> @v_urem_v2i32_24bit(<2 x i32> %num, <2 x i32> %den) {
 ; CGP-NEXT:    v_and_b32_e32 v1, 0xffffff, v1
 ; CGP-NEXT:    v_and_b32_e32 v2, 0xffffff, v2
 ; CGP-NEXT:    v_and_b32_e32 v3, 0xffffff, v3
-; CGP-NEXT:    v_cvt_f32_u32_e32 v4, v2
-; CGP-NEXT:    v_sub_i32_e32 v5, vcc, 0, v2
-; CGP-NEXT:    v_cvt_f32_u32_e32 v6, v3
-; CGP-NEXT:    v_sub_i32_e32 v7, vcc, 0, v3
-; CGP-NEXT:    v_rcp_f32_e32 v4, v4
-; CGP-NEXT:    v_rcp_f32_e32 v6, v6
-; CGP-NEXT:    v_mul_f32_e32 v4, 0x4f7ffffe, v4
-; CGP-NEXT:    v_mul_f32_e32 v6, 0x4f7ffffe, v6
-; CGP-NEXT:    v_cvt_u32_f32_e32 v4, v4
-; CGP-NEXT:    v_cvt_u32_f32_e32 v6, v6
-; CGP-NEXT:    v_mul_lo_u32 v5, v5, v4
-; CGP-NEXT:    v_mul_lo_u32 v7, v7, v6
-; CGP-NEXT:    v_mul_hi_u32 v5, v4, v5
-; CGP-NEXT:    v_mul_hi_u32 v7, v6, v7
-; CGP-NEXT:    v_add_i32_e32 v4, vcc, v4, v5
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v7
-; CGP-NEXT:    v_mul_hi_u32 v4, v0, v4
-; CGP-NEXT:    v_mul_hi_u32 v5, v1, v5
-; CGP-NEXT:    v_mul_lo_u32 v4, v4, v2
-; CGP-NEXT:    v_mul_lo_u32 v5, v5, v3
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
-; CGP-NEXT:    v_sub_i32_e32 v1, vcc, v1, v5
-; CGP-NEXT:    v_sub_i32_e32 v4, vcc, v0, v2
-; CGP-NEXT:    v_sub_i32_e32 v5, vcc, v1, v3
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
-; CGP-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v3
-; CGP-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
-; CGP-NEXT:    v_sub_i32_e32 v4, vcc, v0, v2
-; CGP-NEXT:    v_sub_i32_e32 v5, vcc, v1, v3
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
-; CGP-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v3
-; CGP-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
+; CGP-NEXT:    v_cvt_f32_u32_e32 v4, v0
+; CGP-NEXT:    v_cvt_f32_u32_e32 v5, v2
+; CGP-NEXT:    v_cvt_f32_u32_e32 v6, v1
+; CGP-NEXT:    v_cvt_f32_u32_e32 v7, v3
+; CGP-NEXT:    v_rcp_f32_e32 v8, v5
+; CGP-NEXT:    v_rcp_f32_e32 v9, v7
+; CGP-NEXT:    v_mul_f32_e32 v8, v4, v8
+; CGP-NEXT:    v_mul_f32_e32 v9, v6, v9
+; CGP-NEXT:    v_trunc_f32_e32 v8, v8
+; CGP-NEXT:    v_trunc_f32_e32 v9, v9
+; CGP-NEXT:    v_fma_f32 v4, -v8, v5, v4
+; CGP-NEXT:    v_cvt_u32_f32_e32 v8, v8
+; CGP-NEXT:    v_fma_f32 v6, -v9, v7, v6
+; CGP-NEXT:    v_cvt_u32_f32_e32 v9, v9
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v4|, v5
+; CGP-NEXT:    v_cndmask_b32_e64 v4, 0, 1, s[4:5]
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v6|, v7
+; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, s[4:5]
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v8, v4
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v9, v5
+; CGP-NEXT:    v_mul_lo_u32 v2, v4, v2
+; CGP-NEXT:    v_mul_lo_u32 v3, v5, v3
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; CGP-NEXT:    v_sub_i32_e32 v1, vcc, v1, v3
+; CGP-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
+; CGP-NEXT:    v_and_b32_e32 v1, 0xffffff, v1
 ; CGP-NEXT:    s_setpc_b64 s[30:31]
   %num.mask = and <2 x i32> %num, <i32 16777215, i32 16777215>
   %den.mask = and <2 x i32> %den, <i32 16777215, i32 16777215>

diff  --git a/llvm/test/CodeGen/AMDGPU/bypass-div.ll b/llvm/test/CodeGen/AMDGPU/bypass-div.ll
index cb1b664549c9a..4d8687b141a79 100644
--- a/llvm/test/CodeGen/AMDGPU/bypass-div.ll
+++ b/llvm/test/CodeGen/AMDGPU/bypass-div.ll
@@ -1024,16 +1024,25 @@ define i64 @sdiv64_known32(i64 %a, i64 %b) {
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, v3
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, v1
-; GFX9-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GFX9-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
-; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GFX9-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
+; GFX9-NEXT:    v_sub_u32_e32 v2, 0, v3
+; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GFX9-NEXT:    v_mul_lo_u32 v2, v2, v0
+; GFX9-NEXT:    v_mul_hi_u32 v2, v0, v2
+; GFX9-NEXT:    v_add_u32_e32 v0, v0, v2
+; GFX9-NEXT:    v_mul_hi_u32 v0, v1, v0
+; GFX9-NEXT:    v_mul_lo_u32 v2, v0, v3
+; GFX9-NEXT:    v_add_u32_e32 v4, 1, v0
+; GFX9-NEXT:    v_sub_u32_e32 v1, v1, v2
+; GFX9-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v3
+; GFX9-NEXT:    v_sub_u32_e32 v2, v1, v3
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
+; GFX9-NEXT:    v_add_u32_e32 v2, 1, v0
+; GFX9-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v3
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v3, vcc
-; GFX9-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
   %a.ext = ashr i64 %a, 32
   %b.ext = ashr i64 %b, 32
@@ -1046,15 +1055,25 @@ define i64 @udiv64_known32(i64 %a, i64 %b) {
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, v2
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, v0
-; GFX9-NEXT:    v_rcp_iflag_f32_e32 v2, v1
-; GFX9-NEXT:    v_mul_f32_e32 v2, v0, v2
-; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
-; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GFX9-NEXT:    v_mad_f32 v0, -v2, v1, v0
-; GFX9-NEXT:    v_cmp_ge_f32_e64 vcc, |v0|, v1
+; GFX9-NEXT:    v_sub_u32_e32 v3, 0, v2
+; GFX9-NEXT:    v_rcp_iflag_f32_e32 v1, v1
+; GFX9-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
+; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GFX9-NEXT:    v_mul_lo_u32 v3, v3, v1
+; GFX9-NEXT:    v_mul_hi_u32 v3, v1, v3
+; GFX9-NEXT:    v_add_u32_e32 v1, v1, v3
+; GFX9-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GFX9-NEXT:    v_mul_lo_u32 v3, v1, v2
+; GFX9-NEXT:    v_add_u32_e32 v4, 1, v1
+; GFX9-NEXT:    v_sub_u32_e32 v0, v0, v3
+; GFX9-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
+; GFX9-NEXT:    v_sub_u32_e32 v3, v0, v2
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
+; GFX9-NEXT:    v_add_u32_e32 v3, 1, v1
+; GFX9-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
+; GFX9-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v1, 0
-; GFX9-NEXT:    v_addc_co_u32_e32 v0, vcc, 0, v3, vcc
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
   %a.mask = and i64 %a, 4294967295
   %b.mask = and i64 %b, 4294967295

diff  --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index f37d681ab965e..b086640c72f80 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -529,17 +529,26 @@ define i64 @v_test_sdiv24_64(i64 %x, i64 %y) {
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-NEXT:    v_lshrrev_b32_e32 v0, 8, v3
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, v0
+; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v0
 ; GCN-NEXT:    v_lshrrev_b32_e32 v1, 8, v1
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, v1
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_cvt_i32_f32_e32 v3, v2
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
-; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 25
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v2
+; GCN-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
+; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-NEXT:    v_mul_lo_u32 v3, v3, v2
+; GCN-NEXT:    v_mul_hi_u32 v3, v2, v3
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
+; GCN-NEXT:    v_mul_hi_u32 v2, v1, v2
+; GCN-NEXT:    v_mul_u32_u24_e32 v3, v2, v0
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, 1, v2
+; GCN-NEXT:    v_sub_i32_e32 v1, vcc, v1, v3
+; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GCN-NEXT:    v_sub_i32_e64 v3, s[4:5], v1, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; GCN-NEXT:    v_add_i32_e32 v3, vcc, 1, v2
+; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
 ; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -547,17 +556,26 @@ define i64 @v_test_sdiv24_64(i64 %x, i64 %y) {
 ; GCN-IR:       ; %bb.0:
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 8, v3
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v3, vcc, 0, v0
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v1, 8, v1
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, v1
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v3, v2
-; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v0|
-; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 25
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, v3, v2
+; GCN-IR-NEXT:    v_mul_hi_u32 v3, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, v1, v2
+; GCN-IR-NEXT:    v_mul_u32_u24_e32 v3, v2, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v1, vcc, v1, v3
+; GCN-IR-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v3, s[4:5], v1, v0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v2
+; GCN-IR-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %1 = lshr i64 %x, 40
@@ -569,55 +587,91 @@ define i64 @v_test_sdiv24_64(i64 %x, i64 %y) {
 define amdgpu_kernel void @s_test_sdiv32_64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_sdiv32_64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dword s8, s[0:1], 0xe
-; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s2, s[0:1], 0xe
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s3
-; GCN-NEXT:    s_mov_b32 s4, s0
-; GCN-NEXT:    s_xor_b32 s0, s3, s8
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_ashr_i32 s0, s0, 30
+; GCN-NEXT:    s_ashr_i32 s8, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s8
+; GCN-NEXT:    s_xor_b32 s9, s2, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s9
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_sub_i32 s2, 0, s9
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    s_or_b32 s2, s0, 1
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
-; GCN-NEXT:    s_and_b64 s[0:1], s[0:1], exec
-; GCN-NEXT:    s_cselect_b32 s0, s2, 0
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT:    s_mov_b32 s4, s0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-NEXT:    s_ashr_i32 s2, s3, 31
+; GCN-NEXT:    s_add_i32 s3, s3, s2
+; GCN-NEXT:    s_xor_b32 s3, s3, s2
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    s_xor_b32 s0, s2, s8
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-NEXT:    v_readfirstlane_b32 s1, v0
+; GCN-NEXT:    s_mul_i32 s2, s1, s9
+; GCN-NEXT:    s_sub_i32 s2, s3, s2
+; GCN-NEXT:    s_add_i32 s8, s1, 1
+; GCN-NEXT:    s_sub_i32 s3, s2, s9
+; GCN-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-NEXT:    s_cselect_b32 s1, s8, s1
+; GCN-NEXT:    s_cselect_b32 s2, s3, s2
+; GCN-NEXT:    s_add_i32 s3, s1, 1
+; GCN-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-NEXT:    s_cselect_b32 s1, s3, s1
+; GCN-NEXT:    s_xor_b32 s1, s1, s0
+; GCN-NEXT:    s_sub_i32 s0, s1, s0
+; GCN-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_sdiv32_64:
 ; GCN-IR:       ; %bb.0:
-; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xe
-; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xe
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s3
-; GCN-IR-NEXT:    s_mov_b32 s4, s0
-; GCN-IR-NEXT:    s_xor_b32 s0, s3, s8
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    s_ashr_i32 s0, s0, 30
+; GCN-IR-NEXT:    s_ashr_i32 s8, s2, 31
+; GCN-IR-NEXT:    s_add_i32 s2, s2, s8
+; GCN-IR-NEXT:    s_xor_b32 s9, s2, s8
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s9
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_sub_i32 s2, 0, s9
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GCN-IR-NEXT:    s_mov_b32 s5, s1
-; GCN-IR-NEXT:    s_or_b32 s2, s0, 1
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[0:1], |v1|, |v0|
-; GCN-IR-NEXT:    s_and_b64 s[0:1], s[0:1], exec
-; GCN-IR-NEXT:    s_cselect_b32 s0, s2, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s0, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-IR-NEXT:    s_ashr_i32 s2, s3, 31
+; GCN-IR-NEXT:    s_add_i32 s3, s3, s2
+; GCN-IR-NEXT:    s_xor_b32 s3, s3, s2
+; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-IR-NEXT:    s_xor_b32 s0, s2, s8
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-IR-NEXT:    v_readfirstlane_b32 s1, v0
+; GCN-IR-NEXT:    s_mul_i32 s2, s1, s9
+; GCN-IR-NEXT:    s_sub_i32 s2, s3, s2
+; GCN-IR-NEXT:    s_add_i32 s8, s1, 1
+; GCN-IR-NEXT:    s_sub_i32 s3, s2, s9
+; GCN-IR-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-IR-NEXT:    s_cselect_b32 s1, s8, s1
+; GCN-IR-NEXT:    s_cselect_b32 s2, s3, s2
+; GCN-IR-NEXT:    s_add_i32 s3, s1, 1
+; GCN-IR-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-IR-NEXT:    s_cselect_b32 s1, s3, s1
+; GCN-IR-NEXT:    s_xor_b32 s1, s1, s0
+; GCN-IR-NEXT:    s_sub_i32 s0, s1, s0
+; GCN-IR-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 32
@@ -630,62 +684,96 @@ define amdgpu_kernel void @s_test_sdiv32_64(ptr addrspace(1) %out, i64 %x, i64 %
 define amdgpu_kernel void @s_test_sdiv31_64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_sdiv31_64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s1, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_load_dword s3, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s0, s4
-; GCN-NEXT:    s_ashr_i64 s[8:9], s[0:1], 33
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-NEXT:    s_mov_b32 s1, s5
-; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 33
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s4
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_xor_b32 s4, s4, s8
-; GCN-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-NEXT:    s_or_b32 s6, s4, 1
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
-; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; GCN-NEXT:    s_cselect_b32 s4, s6, 0
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
-; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 31
-; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 33
+; GCN-NEXT:    s_ashr_i32 s8, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s8
+; GCN-NEXT:    s_xor_b32 s9, s2, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s9
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_sub_i32 s2, 0, s9
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    s_mov_b32 s5, s1
+; GCN-NEXT:    s_mov_b32 s4, s0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 33
+; GCN-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s3
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    s_xor_b32 s2, s2, s3
+; GCN-NEXT:    s_xor_b32 s0, s3, s8
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT:    v_readfirstlane_b32 s1, v0
+; GCN-NEXT:    s_mul_i32 s3, s1, s9
+; GCN-NEXT:    s_sub_i32 s2, s2, s3
+; GCN-NEXT:    s_add_i32 s8, s1, 1
+; GCN-NEXT:    s_sub_i32 s3, s2, s9
+; GCN-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-NEXT:    s_cselect_b32 s1, s8, s1
+; GCN-NEXT:    s_cselect_b32 s2, s3, s2
+; GCN-NEXT:    s_add_i32 s3, s1, 1
+; GCN-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-NEXT:    s_cselect_b32 s1, s3, s1
+; GCN-NEXT:    s_xor_b32 s1, s1, s0
+; GCN-NEXT:    s_sub_i32 s0, s1, s0
+; GCN-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_sdiv31_64:
 ; GCN-IR:       ; %bb.0:
-; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_load_dword s1, s[0:1], 0xe
-; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_mov_b32 s0, s4
-; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[0:1], 33
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
-; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 33
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s4
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    s_xor_b32 s4, s4, s8
-; GCN-IR-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-IR-NEXT:    s_or_b32 s6, s4, 1
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
-; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; GCN-IR-NEXT:    s_cselect_b32 s4, s6, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
-; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 31
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_ashr_i64 s[2:3], s[2:3], 33
+; GCN-IR-NEXT:    s_ashr_i32 s8, s2, 31
+; GCN-IR-NEXT:    s_add_i32 s2, s2, s8
+; GCN-IR-NEXT:    s_xor_b32 s9, s2, s8
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s9
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_sub_i32 s2, 0, s9
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-IR-NEXT:    s_ashr_i64 s[2:3], s[2:3], 33
+; GCN-IR-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-IR-NEXT:    s_add_i32 s2, s2, s3
+; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-IR-NEXT:    s_xor_b32 s2, s2, s3
+; GCN-IR-NEXT:    s_xor_b32 s0, s3, s8
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-IR-NEXT:    v_readfirstlane_b32 s1, v0
+; GCN-IR-NEXT:    s_mul_i32 s3, s1, s9
+; GCN-IR-NEXT:    s_sub_i32 s2, s2, s3
+; GCN-IR-NEXT:    s_add_i32 s8, s1, 1
+; GCN-IR-NEXT:    s_sub_i32 s3, s2, s9
+; GCN-IR-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-IR-NEXT:    s_cselect_b32 s1, s8, s1
+; GCN-IR-NEXT:    s_cselect_b32 s2, s3, s2
+; GCN-IR-NEXT:    s_add_i32 s3, s1, 1
+; GCN-IR-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-IR-NEXT:    s_cselect_b32 s1, s3, s1
+; GCN-IR-NEXT:    s_xor_b32 s1, s1, s0
+; GCN-IR-NEXT:    s_sub_i32 s0, s1, s0
+; GCN-IR-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 33
   %2 = ashr i64 %y, 33
@@ -764,62 +852,96 @@ define amdgpu_kernel void @s_test_sdiv23_64(ptr addrspace(1) %out, i64 %x, i64 %
 define amdgpu_kernel void @s_test_sdiv25_64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_sdiv25_64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s1, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_load_dword s3, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s0, s4
-; GCN-NEXT:    s_ashr_i64 s[8:9], s[0:1], 39
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-NEXT:    s_mov_b32 s1, s5
-; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 39
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s4
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_xor_b32 s4, s4, s8
-; GCN-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-NEXT:    s_or_b32 s6, s4, 1
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
-; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; GCN-NEXT:    s_cselect_b32 s4, s6, 0
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
-; GCN-NEXT:    v_bfe_i32 v0, v0, 0, 25
-; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 39
+; GCN-NEXT:    s_ashr_i32 s8, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s8
+; GCN-NEXT:    s_xor_b32 s9, s2, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s9
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_sub_i32 s2, 0, s9
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    s_mov_b32 s5, s1
+; GCN-NEXT:    s_mov_b32 s4, s0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 39
+; GCN-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s3
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    s_xor_b32 s2, s2, s3
+; GCN-NEXT:    s_xor_b32 s0, s3, s8
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT:    v_readfirstlane_b32 s1, v0
+; GCN-NEXT:    s_mul_i32 s3, s1, s9
+; GCN-NEXT:    s_sub_i32 s2, s2, s3
+; GCN-NEXT:    s_add_i32 s8, s1, 1
+; GCN-NEXT:    s_sub_i32 s3, s2, s9
+; GCN-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-NEXT:    s_cselect_b32 s1, s8, s1
+; GCN-NEXT:    s_cselect_b32 s2, s3, s2
+; GCN-NEXT:    s_add_i32 s3, s1, 1
+; GCN-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-NEXT:    s_cselect_b32 s1, s3, s1
+; GCN-NEXT:    s_xor_b32 s1, s1, s0
+; GCN-NEXT:    s_sub_i32 s0, s1, s0
+; GCN-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_sdiv25_64:
 ; GCN-IR:       ; %bb.0:
-; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_load_dword s1, s[0:1], 0xe
-; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_mov_b32 s0, s4
-; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[0:1], 39
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
-; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 39
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s4
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    s_xor_b32 s4, s4, s8
-; GCN-IR-NEXT:    s_ashr_i32 s4, s4, 30
-; GCN-IR-NEXT:    s_or_b32 s6, s4, 1
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
-; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; GCN-IR-NEXT:    s_cselect_b32 s4, s6, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s4, v2
-; GCN-IR-NEXT:    v_bfe_i32 v0, v0, 0, 25
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_ashr_i64 s[2:3], s[2:3], 39
+; GCN-IR-NEXT:    s_ashr_i32 s8, s2, 31
+; GCN-IR-NEXT:    s_add_i32 s2, s2, s8
+; GCN-IR-NEXT:    s_xor_b32 s9, s2, s8
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s9
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_sub_i32 s2, 0, s9
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-IR-NEXT:    s_ashr_i64 s[2:3], s[2:3], 39
+; GCN-IR-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-IR-NEXT:    s_add_i32 s2, s2, s3
+; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-IR-NEXT:    s_xor_b32 s2, s2, s3
+; GCN-IR-NEXT:    s_xor_b32 s0, s3, s8
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-IR-NEXT:    v_readfirstlane_b32 s1, v0
+; GCN-IR-NEXT:    s_mul_i32 s3, s1, s9
+; GCN-IR-NEXT:    s_sub_i32 s2, s2, s3
+; GCN-IR-NEXT:    s_add_i32 s8, s1, 1
+; GCN-IR-NEXT:    s_sub_i32 s3, s2, s9
+; GCN-IR-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-IR-NEXT:    s_cselect_b32 s1, s8, s1
+; GCN-IR-NEXT:    s_cselect_b32 s2, s3, s2
+; GCN-IR-NEXT:    s_add_i32 s3, s1, 1
+; GCN-IR-NEXT:    s_cmp_ge_u32 s2, s9
+; GCN-IR-NEXT:    s_cselect_b32 s1, s3, s1
+; GCN-IR-NEXT:    s_xor_b32 s1, s1, s0
+; GCN-IR-NEXT:    s_sub_i32 s0, s1, s0
+; GCN-IR-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 39
   %2 = ashr i64 %y, 39

diff  --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index e10be23a71770..ed7f27b367fda 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -650,72 +650,90 @@ define i64 @v_test_srem24_64(i64 %x, i64 %y) {
 define amdgpu_kernel void @s_test_srem25_64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_srem25_64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s1, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_load_dword s3, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s0, s4
-; GCN-NEXT:    s_ashr_i64 s[8:9], s[0:1], 39
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-NEXT:    s_mov_b32 s1, s5
-; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 39
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s4
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_xor_b32 s5, s4, s8
-; GCN-NEXT:    s_ashr_i32 s5, s5, 30
-; GCN-NEXT:    s_or_b32 s5, s5, 1
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v1|, |v0|
-; GCN-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; GCN-NEXT:    s_cselect_b32 s5, s5, 0
-; GCN-NEXT:    v_readfirstlane_b32 s6, v2
-; GCN-NEXT:    s_add_i32 s5, s6, s5
-; GCN-NEXT:    s_mul_i32 s5, s5, s8
-; GCN-NEXT:    s_sub_i32 s4, s4, s5
-; GCN-NEXT:    s_bfe_i32 s4, s4, 0x190000
-; GCN-NEXT:    s_ashr_i32 s5, s4, 31
-; GCN-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 39
+; GCN-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s3
+; GCN-NEXT:    s_xor_b32 s8, s2, s3
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    s_mov_b32 s4, s0
+; GCN-NEXT:    s_mov_b32 s5, s1
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 39
+; GCN-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s3
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    s_xor_b32 s2, s2, s3
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-NEXT:    s_sub_i32 s0, s2, s0
+; GCN-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-NEXT:    s_xor_b32 s0, s0, s3
+; GCN-NEXT:    s_sub_i32 s0, s0, s3
+; GCN-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_srem25_64:
 ; GCN-IR:       ; %bb.0:
-; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_load_dword s1, s[0:1], 0xe
-; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_mov_b32 s0, s4
-; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[0:1], 39
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
-; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 39
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s4
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    s_xor_b32 s5, s4, s8
-; GCN-IR-NEXT:    s_ashr_i32 s5, s5, 30
-; GCN-IR-NEXT:    s_or_b32 s5, s5, 1
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v1|, |v0|
-; GCN-IR-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; GCN-IR-NEXT:    s_cselect_b32 s5, s5, 0
-; GCN-IR-NEXT:    v_readfirstlane_b32 s6, v2
-; GCN-IR-NEXT:    s_add_i32 s5, s6, s5
-; GCN-IR-NEXT:    s_mul_i32 s5, s5, s8
-; GCN-IR-NEXT:    s_sub_i32 s4, s4, s5
-; GCN-IR-NEXT:    s_bfe_i32 s4, s4, 0x190000
-; GCN-IR-NEXT:    s_ashr_i32 s5, s4, 31
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_ashr_i64 s[2:3], s[2:3], 39
+; GCN-IR-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-IR-NEXT:    s_add_i32 s2, s2, s3
+; GCN-IR-NEXT:    s_xor_b32 s8, s2, s3
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-IR-NEXT:    s_ashr_i64 s[2:3], s[2:3], 39
+; GCN-IR-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-IR-NEXT:    s_add_i32 s2, s2, s3
+; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-IR-NEXT:    s_xor_b32 s2, s2, s3
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-IR-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-IR-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-IR-NEXT:    s_sub_i32 s0, s2, s0
+; GCN-IR-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-IR-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-IR-NEXT:    s_xor_b32 s0, s0, s3
+; GCN-IR-NEXT:    s_sub_i32 s0, s0, s3
+; GCN-IR-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 39
   %2 = ashr i64 %y, 39
@@ -727,72 +745,90 @@ define amdgpu_kernel void @s_test_srem25_64(ptr addrspace(1) %out, i64 %x, i64 %
 define amdgpu_kernel void @s_test_srem31_64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_srem31_64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-NEXT:    s_load_dword s1, s[0:1], 0xe
-; GCN-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_load_dword s3, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s0, s4
-; GCN-NEXT:    s_ashr_i64 s[8:9], s[0:1], 33
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-NEXT:    s_mov_b32 s1, s5
-; GCN-NEXT:    s_ashr_i64 s[4:5], s[6:7], 33
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s4
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_xor_b32 s5, s4, s8
-; GCN-NEXT:    s_ashr_i32 s5, s5, 30
-; GCN-NEXT:    s_or_b32 s5, s5, 1
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v1|, |v0|
-; GCN-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; GCN-NEXT:    s_cselect_b32 s5, s5, 0
-; GCN-NEXT:    v_readfirstlane_b32 s6, v2
-; GCN-NEXT:    s_add_i32 s5, s6, s5
-; GCN-NEXT:    s_mul_i32 s5, s5, s8
-; GCN-NEXT:    s_sub_i32 s4, s4, s5
-; GCN-NEXT:    s_bfe_i32 s4, s4, 0x1f0000
-; GCN-NEXT:    s_ashr_i32 s5, s4, 31
-; GCN-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 33
+; GCN-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s3
+; GCN-NEXT:    s_xor_b32 s8, s2, s3
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    s_mov_b32 s4, s0
+; GCN-NEXT:    s_mov_b32 s5, s1
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 33
+; GCN-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s3
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    s_xor_b32 s2, s2, s3
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-NEXT:    s_sub_i32 s0, s2, s0
+; GCN-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-NEXT:    s_xor_b32 s0, s0, s3
+; GCN-NEXT:    s_sub_i32 s0, s0, s3
+; GCN-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_srem31_64:
 ; GCN-IR:       ; %bb.0:
-; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_load_dword s1, s[0:1], 0xe
-; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-IR-NEXT:    s_mov_b32 s2, -1
+; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_mov_b32 s0, s4
-; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[0:1], 33
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
-; GCN-IR-NEXT:    s_ashr_i64 s[4:5], s[6:7], 33
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s4
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    s_xor_b32 s5, s4, s8
-; GCN-IR-NEXT:    s_ashr_i32 s5, s5, 30
-; GCN-IR-NEXT:    s_or_b32 s5, s5, 1
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[6:7], |v1|, |v0|
-; GCN-IR-NEXT:    s_and_b64 s[6:7], s[6:7], exec
-; GCN-IR-NEXT:    s_cselect_b32 s5, s5, 0
-; GCN-IR-NEXT:    v_readfirstlane_b32 s6, v2
-; GCN-IR-NEXT:    s_add_i32 s5, s6, s5
-; GCN-IR-NEXT:    s_mul_i32 s5, s5, s8
-; GCN-IR-NEXT:    s_sub_i32 s4, s4, s5
-; GCN-IR-NEXT:    s_bfe_i32 s4, s4, 0x1f0000
-; GCN-IR-NEXT:    s_ashr_i32 s5, s4, 31
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_ashr_i64 s[2:3], s[2:3], 33
+; GCN-IR-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-IR-NEXT:    s_add_i32 s2, s2, s3
+; GCN-IR-NEXT:    s_xor_b32 s8, s2, s3
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
+; GCN-IR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-IR-NEXT:    s_ashr_i64 s[2:3], s[2:3], 33
+; GCN-IR-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-IR-NEXT:    s_add_i32 s2, s2, s3
+; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-IR-NEXT:    s_xor_b32 s2, s2, s3
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-IR-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-IR-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-IR-NEXT:    s_sub_i32 s0, s2, s0
+; GCN-IR-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-IR-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-IR-NEXT:    s_xor_b32 s0, s0, s3
+; GCN-IR-NEXT:    s_sub_i32 s0, s0, s3
+; GCN-IR-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 33
   %2 = ashr i64 %y, 33
@@ -805,59 +841,85 @@ define amdgpu_kernel void @s_test_srem31_64(ptr addrspace(1) %out, i64 %x, i64 %
 define amdgpu_kernel void @s_test_srem32_64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_srem32_64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dword s8, s[0:1], 0xe
-; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s2, s[0:1], 0xe
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-NEXT:    v_cvt_f32_i32_e32 v1, s3
-; GCN-NEXT:    s_xor_b32 s2, s3, s8
-; GCN-NEXT:    s_ashr_i32 s2, s2, 30
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    s_or_b32 s2, s2, 1
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
-; GCN-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; GCN-NEXT:    s_cselect_b32 s2, s2, 0
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s8
+; GCN-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-NEXT:    s_add_i32 s2, s2, s3
+; GCN-NEXT:    s_xor_b32 s8, s2, s3
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s3, v0
-; GCN-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-NEXT:    s_ashr_i32 s2, s3, 31
+; GCN-NEXT:    s_add_i32 s3, s3, s2
+; GCN-NEXT:    s_xor_b32 s3, s3, s2
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-NEXT:    s_sub_i32 s0, s3, s0
+; GCN-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-NEXT:    s_xor_b32 s0, s0, s2
+; GCN-NEXT:    s_sub_i32 s0, s0, s2
+; GCN-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_srem32_64:
 ; GCN-IR:       ; %bb.0:
-; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xe
-; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xe
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v0, s8
-; GCN-IR-NEXT:    v_cvt_f32_i32_e32 v1, s3
-; GCN-IR-NEXT:    s_xor_b32 s2, s3, s8
-; GCN-IR-NEXT:    s_ashr_i32 s2, s2, 30
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    s_or_b32 s2, s2, 1
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT:    v_cvt_i32_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v1|, |v0|
-; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; GCN-IR-NEXT:    s_cselect_b32 s2, s2, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s2, v2
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s8
+; GCN-IR-NEXT:    s_ashr_i32 s3, s2, 31
+; GCN-IR-NEXT:    s_add_i32 s2, s2, s3
+; GCN-IR-NEXT:    s_xor_b32 s8, s2, s3
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GCN-IR-NEXT:    s_mov_b32 s4, s0
 ; GCN-IR-NEXT:    s_mov_b32 s5, s1
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s3, v0
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-IR-NEXT:    s_ashr_i32 s2, s3, 31
+; GCN-IR-NEXT:    s_add_i32 s3, s3, s2
+; GCN-IR-NEXT:    s_xor_b32 s3, s3, s2
+; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-IR-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-IR-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-IR-NEXT:    s_sub_i32 s0, s3, s0
+; GCN-IR-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-IR-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-IR-NEXT:    s_xor_b32 s0, s0, s2
+; GCN-IR-NEXT:    s_sub_i32 s0, s0, s2
+; GCN-IR-NEXT:    s_ashr_i32 s1, s0, 31
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 32

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv.ll b/llvm/test/CodeGen/AMDGPU/udiv.ll
index fbfaf3cff0806..f686aad0cefc2 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv.ll
@@ -1848,96 +1848,76 @@ define amdgpu_kernel void @v_udiv_i23(ptr addrspace(1) %out, ptr addrspace(1) %i
 define amdgpu_kernel void @v_udiv_i24(ptr addrspace(1) %out, ptr addrspace(1) %in) {
 ; SI-LABEL: v_udiv_i24:
 ; SI:       ; %bb.0:
-; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; SI-NEXT:    s_mov_b32 s3, 0xf000
-; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_mov_b32 s10, s2
-; SI-NEXT:    s_mov_b32 s11, s3
+; SI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_mov_b32 s10, s6
+; SI-NEXT:    s_mov_b32 s11, s7
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_mov_b32 s8, s6
-; SI-NEXT:    s_mov_b32 s9, s7
+; SI-NEXT:    s_mov_b32 s8, s2
+; SI-NEXT:    s_mov_b32 s9, s3
 ; SI-NEXT:    buffer_load_ubyte v0, off, s[8:11], 0 offset:6
 ; SI-NEXT:    buffer_load_ushort v1, off, s[8:11], 0 offset:4
 ; SI-NEXT:    buffer_load_ubyte v2, off, s[8:11], 0 offset:2
 ; SI-NEXT:    buffer_load_ushort v3, off, s[8:11], 0
-; SI-NEXT:    s_mov_b32 s0, s4
-; SI-NEXT:    s_mov_b32 s1, s5
+; SI-NEXT:    s_mov_b32 s4, s0
+; SI-NEXT:    s_mov_b32 s5, s1
 ; SI-NEXT:    s_waitcnt vmcnt(3)
 ; SI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; SI-NEXT:    s_waitcnt vmcnt(2)
 ; SI-NEXT:    v_or_b32_e32 v0, v1, v0
-; SI-NEXT:    v_cvt_f32_u32_e32 v1, v0
-; SI-NEXT:    v_sub_i32_e32 v4, vcc, 0, v0
+; SI-NEXT:    v_cvt_f32_u32_e32 v0, v0
 ; SI-NEXT:    s_waitcnt vmcnt(1)
-; SI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT:    v_rcp_iflag_f32_e32 v1, v1
+; SI-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_or_b32_e32 v2, v3, v2
-; SI-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
-; SI-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; SI-NEXT:    v_mul_lo_u32 v4, v4, v1
-; SI-NEXT:    v_mul_hi_u32 v4, v1, v4
-; SI-NEXT:    v_add_i32_e32 v1, vcc, v1, v4
-; SI-NEXT:    v_mul_hi_u32 v1, v2, v1
-; SI-NEXT:    v_mul_lo_u32 v3, v1, v0
-; SI-NEXT:    v_add_i32_e32 v4, vcc, 1, v1
-; SI-NEXT:    v_sub_i32_e32 v2, vcc, v2, v3
-; SI-NEXT:    v_sub_i32_e32 v3, vcc, v2, v0
-; SI-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v0
-; SI-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
-; SI-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
-; SI-NEXT:    v_add_i32_e32 v3, vcc, 1, v1
-; SI-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v0
-; SI-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
+; SI-NEXT:    v_or_b32_e32 v1, v3, v1
+; SI-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; SI-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; SI-NEXT:    v_mul_f32_e32 v2, v1, v2
+; SI-NEXT:    v_trunc_f32_e32 v2, v2
+; SI-NEXT:    v_cvt_u32_f32_e32 v3, v2
+; SI-NEXT:    v_mad_f32 v1, -v2, v0, v1
+; SI-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
+; SI-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
 ; SI-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
-; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
 ;
 ; VI-LABEL: v_udiv_i24:
 ; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
-; VI-NEXT:    s_mov_b32 s3, 0xf000
-; VI-NEXT:    s_mov_b32 s2, -1
-; VI-NEXT:    s_mov_b32 s10, s2
-; VI-NEXT:    s_mov_b32 s11, s3
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
+; VI-NEXT:    s_mov_b32 s7, 0xf000
+; VI-NEXT:    s_mov_b32 s6, -1
+; VI-NEXT:    s_mov_b32 s10, s6
+; VI-NEXT:    s_mov_b32 s11, s7
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_mov_b32 s8, s6
-; VI-NEXT:    s_mov_b32 s9, s7
+; VI-NEXT:    s_mov_b32 s8, s2
+; VI-NEXT:    s_mov_b32 s9, s3
 ; VI-NEXT:    buffer_load_ubyte v0, off, s[8:11], 0 offset:6
 ; VI-NEXT:    buffer_load_ushort v1, off, s[8:11], 0 offset:4
 ; VI-NEXT:    buffer_load_ubyte v2, off, s[8:11], 0 offset:2
 ; VI-NEXT:    buffer_load_ushort v3, off, s[8:11], 0
-; VI-NEXT:    s_mov_b32 s0, s4
-; VI-NEXT:    s_mov_b32 s1, s5
+; VI-NEXT:    s_mov_b32 s4, s0
+; VI-NEXT:    s_mov_b32 s5, s1
 ; VI-NEXT:    s_waitcnt vmcnt(3)
 ; VI-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; VI-NEXT:    s_waitcnt vmcnt(2)
 ; VI-NEXT:    v_or_b32_e32 v0, v1, v0
-; VI-NEXT:    v_cvt_f32_u32_e32 v1, v0
-; VI-NEXT:    v_sub_u32_e32 v4, vcc, 0, v0
+; VI-NEXT:    v_cvt_f32_u32_e32 v0, v0
 ; VI-NEXT:    s_waitcnt vmcnt(1)
-; VI-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
-; VI-NEXT:    v_rcp_iflag_f32_e32 v1, v1
+; VI-NEXT:    v_lshlrev_b32_e32 v1, 16, v2
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_or_b32_e32 v2, v3, v2
-; VI-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
-; VI-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; VI-NEXT:    v_mul_lo_u32 v4, v4, v1
-; VI-NEXT:    v_mul_hi_u32 v4, v1, v4
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v4
-; VI-NEXT:    v_mul_hi_u32 v1, v2, v1
-; VI-NEXT:    v_mul_lo_u32 v3, v1, v0
-; VI-NEXT:    v_add_u32_e32 v4, vcc, 1, v1
-; VI-NEXT:    v_sub_u32_e32 v2, vcc, v2, v3
-; VI-NEXT:    v_sub_u32_e32 v3, vcc, v2, v0
-; VI-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v0
-; VI-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
-; VI-NEXT:    v_add_u32_e32 v3, vcc, 1, v1
-; VI-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v0
-; VI-NEXT:    v_cndmask_b32_e32 v0, v1, v3, vcc
+; VI-NEXT:    v_or_b32_e32 v1, v3, v1
+; VI-NEXT:    v_cvt_f32_u32_e32 v1, v1
+; VI-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; VI-NEXT:    v_mul_f32_e32 v2, v1, v2
+; VI-NEXT:    v_trunc_f32_e32 v2, v2
+; VI-NEXT:    v_cvt_u32_f32_e32 v3, v2
+; VI-NEXT:    v_mad_f32 v1, -v2, v0, v1
+; VI-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
+; VI-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
 ; VI-NEXT:    v_and_b32_e32 v0, 0xffffff, v0
-; VI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT:    buffer_store_dword v0, off, s[4:7], 0
 ; VI-NEXT:    s_endpgm
 ;
 ; GCN-LABEL: v_udiv_i24:
@@ -1947,50 +1927,40 @@ define amdgpu_kernel void @v_udiv_i24(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_add_u32 s4, s2, 4
 ; GCN-NEXT:    s_addc_u32 s5, s3, 0
 ; GCN-NEXT:    s_add_u32 s6, s2, 2
-; GCN-NEXT:    v_mov_b32_e32 v0, s4
 ; GCN-NEXT:    s_addc_u32 s7, s3, 0
-; GCN-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-NEXT:    s_add_u32 s4, s2, 6
-; GCN-NEXT:    s_addc_u32 s5, s3, 0
-; GCN-NEXT:    v_mov_b32_e32 v2, s4
-; GCN-NEXT:    v_mov_b32_e32 v3, s5
-; GCN-NEXT:    flat_load_ubyte v4, v[2:3]
-; GCN-NEXT:    flat_load_ushort v5, v[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NEXT:    s_add_u32 s6, s2, 6
+; GCN-NEXT:    s_addc_u32 s7, s3, 0
 ; GCN-NEXT:    v_mov_b32_e32 v2, s6
-; GCN-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-NEXT:    v_mov_b32_e32 v3, s7
-; GCN-NEXT:    v_mov_b32_e32 v1, s3
-; GCN-NEXT:    flat_load_ubyte v2, v[2:3]
-; GCN-NEXT:    flat_load_ushort v0, v[0:1]
+; GCN-NEXT:    v_mov_b32_e32 v4, s4
+; GCN-NEXT:    v_mov_b32_e32 v5, s5
+; GCN-NEXT:    flat_load_ubyte v6, v[2:3]
+; GCN-NEXT:    flat_load_ushort v4, v[4:5]
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    v_mov_b32_e32 v3, s3
+; GCN-NEXT:    flat_load_ubyte v0, v[0:1]
+; GCN-NEXT:    flat_load_ushort v1, v[2:3]
 ; GCN-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
+; GCN-NEXT:    v_lshlrev_b32_e32 v2, 16, v6
 ; GCN-NEXT:    s_waitcnt vmcnt(2)
-; GCN-NEXT:    v_or_b32_e32 v3, v5, v1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, v3
-; GCN-NEXT:    v_sub_u32_e32 v4, vcc, 0, v3
+; GCN-NEXT:    v_or_b32_e32 v2, v4, v2
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, v2
 ; GCN-NEXT:    s_waitcnt vmcnt(1)
-; GCN-NEXT:    v_lshlrev_b32_e32 v2, 16, v2
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v1
+; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v2, v0, v2
-; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_lo_u32 v4, v4, v1
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GCN-NEXT:    v_add_u32_e32 v0, vcc, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v4, v2, v0
+; GCN-NEXT:    v_or_b32_e32 v0, v1, v0
+; GCN-NEXT:    v_cvt_f32_u32_e32 v3, v0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v4, v2
 ; GCN-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-NEXT:    v_mov_b32_e32 v1, s1
-; GCN-NEXT:    v_mul_lo_u32 v5, v4, v3
-; GCN-NEXT:    v_add_u32_e32 v6, vcc, 1, v4
-; GCN-NEXT:    v_sub_u32_e32 v2, vcc, v2, v5
-; GCN-NEXT:    v_sub_u32_e32 v5, vcc, v2, v3
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v3
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
-; GCN-NEXT:    v_add_u32_e32 v5, vcc, 1, v4
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v3
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v4, v5, vcc
+; GCN-NEXT:    v_mul_f32_e32 v4, v3, v4
+; GCN-NEXT:    v_trunc_f32_e32 v4, v4
+; GCN-NEXT:    v_cvt_u32_f32_e32 v5, v4
+; GCN-NEXT:    v_mad_f32 v3, -v4, v2, v3
+; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, v2
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
 ; GCN-NEXT:    v_and_b32_e32 v2, 0xffffff, v2
 ; GCN-NEXT:    flat_store_dword v[0:1], v2
 ; GCN-NEXT:    s_endpgm
@@ -2006,39 +1976,23 @@ define amdgpu_kernel void @v_udiv_i24(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GFX1030-NEXT:    global_load_ubyte v3, v0, s[2:3] offset:2
 ; GFX1030-NEXT:    global_load_ushort v4, v0, s[2:3]
 ; GFX1030-NEXT:    s_waitcnt vmcnt(3)
-; GFX1030-NEXT:    v_readfirstlane_b32 s2, v1
+; GFX1030-NEXT:    v_lshlrev_b32_e32 v1, 16, v1
 ; GFX1030-NEXT:    s_waitcnt vmcnt(2)
-; GFX1030-NEXT:    v_readfirstlane_b32 s3, v2
+; GFX1030-NEXT:    v_or_b32_e32 v1, v2, v1
 ; GFX1030-NEXT:    s_waitcnt vmcnt(1)
-; GFX1030-NEXT:    v_readfirstlane_b32 s4, v3
+; GFX1030-NEXT:    v_lshlrev_b32_e32 v2, 16, v3
+; GFX1030-NEXT:    v_cvt_f32_u32_e32 v1, v1
 ; GFX1030-NEXT:    s_waitcnt vmcnt(0)
-; GFX1030-NEXT:    v_readfirstlane_b32 s5, v4
-; GFX1030-NEXT:    s_lshl_b32 s2, s2, 16
-; GFX1030-NEXT:    s_or_b32 s2, s3, s2
-; GFX1030-NEXT:    s_lshl_b32 s4, s4, 16
-; GFX1030-NEXT:    v_cvt_f32_u32_e32 v1, s2
-; GFX1030-NEXT:    s_sub_i32 s6, 0, s2
-; GFX1030-NEXT:    s_or_b32 s4, s5, s4
-; GFX1030-NEXT:    v_rcp_iflag_f32_e32 v1, v1
-; GFX1030-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
-; GFX1030-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX1030-NEXT:    v_readfirstlane_b32 s3, v1
-; GFX1030-NEXT:    s_mul_i32 s6, s6, s3
-; GFX1030-NEXT:    s_mul_hi_u32 s6, s3, s6
-; GFX1030-NEXT:    s_add_i32 s3, s3, s6
-; GFX1030-NEXT:    s_mul_hi_u32 s3, s4, s3
-; GFX1030-NEXT:    s_mul_i32 s5, s3, s2
-; GFX1030-NEXT:    s_sub_i32 s4, s4, s5
-; GFX1030-NEXT:    s_add_i32 s5, s3, 1
-; GFX1030-NEXT:    s_sub_i32 s6, s4, s2
-; GFX1030-NEXT:    s_cmp_ge_u32 s4, s2
-; GFX1030-NEXT:    s_cselect_b32 s3, s5, s3
-; GFX1030-NEXT:    s_cselect_b32 s4, s6, s4
-; GFX1030-NEXT:    s_add_i32 s5, s3, 1
-; GFX1030-NEXT:    s_cmp_ge_u32 s4, s2
-; GFX1030-NEXT:    s_cselect_b32 s2, s5, s3
-; GFX1030-NEXT:    s_and_b32 s2, s2, 0xffffff
-; GFX1030-NEXT:    v_mov_b32_e32 v1, s2
+; GFX1030-NEXT:    v_or_b32_e32 v2, v4, v2
+; GFX1030-NEXT:    v_rcp_iflag_f32_e32 v3, v1
+; GFX1030-NEXT:    v_cvt_f32_u32_e32 v2, v2
+; GFX1030-NEXT:    v_mul_f32_e32 v3, v2, v3
+; GFX1030-NEXT:    v_trunc_f32_e32 v3, v3
+; GFX1030-NEXT:    v_fma_f32 v2, -v3, v1, v2
+; GFX1030-NEXT:    v_cvt_u32_f32_e32 v3, v3
+; GFX1030-NEXT:    v_cmp_ge_f32_e64 vcc_lo, |v2|, v1
+; GFX1030-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo
+; GFX1030-NEXT:    v_and_b32_e32 v1, 0xffffff, v1
 ; GFX1030-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX1030-NEXT:    s_endpgm
 ;

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index bde3415c9d103..48b9c72ea6892 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -497,45 +497,73 @@ define i64 @v_test_udiv24_i64(i64 %x, i64 %y) {
 define amdgpu_kernel void @s_test_udiv32_i64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_udiv32_i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dword s4, s[0:1], 0xe
-; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s8, s[0:1], 0xe
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s3
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_mov_b32 s4, s0
 ; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-NEXT:    s_sub_i32 s0, s3, s0
+; GCN-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, 1, v0
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, 1, v0
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_udiv32_i64:
 ; GCN-IR:       ; %bb.0:
-; GCN-IR-NEXT:    s_load_dword s4, s[0:1], 0xe
-; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s8, s[0:1], 0xe
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, s3
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-IR-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_mov_b32 s4, s0
 ; GCN-IR-NEXT:    s_mov_b32 s5, s1
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-IR-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-IR-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-IR-NEXT:    s_sub_i32 s0, s3, s0
+; GCN-IR-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, 1, v0
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, 1, v0
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = lshr i64 %x, 32
@@ -548,51 +576,77 @@ define amdgpu_kernel void @s_test_udiv32_i64(ptr addrspace(1) %out, i64 %x, i64
 define amdgpu_kernel void @s_test_udiv31_i64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_udiv31_i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dword s4, s[0:1], 0xe
-; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    s_load_dword s2, s[0:1], 0xe
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_lshr_b32 s2, s4, 1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-NEXT:    s_lshr_b32 s8, s2, 1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_lshr_b32 s2, s3, 1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s2
 ; GCN-NEXT:    s_mov_b32 s4, s0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
 ; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
+; GCN-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-NEXT:    s_sub_i32 s0, s2, s0
+; GCN-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, 1, v0
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, 1, v0
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_udiv31_i64:
 ; GCN-IR:       ; %bb.0:
-; GCN-IR-NEXT:    s_load_dword s4, s[0:1], 0xe
-; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xe
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_lshr_b32 s2, s4, 1
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-IR-NEXT:    s_lshr_b32 s8, s2, 1
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-IR-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v0
+; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_lshr_b32 s2, s3, 1
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, s2
 ; GCN-IR-NEXT:    s_mov_b32 s4, s0
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, s2, v0
 ; GCN-IR-NEXT:    s_mov_b32 s5, s1
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
+; GCN-IR-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-IR-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-IR-NEXT:    s_sub_i32 s0, s2, s0
+; GCN-IR-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, 1, v0
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-IR-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, 1, v0
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b64 vcc, -1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = lshr i64 %x, 33

diff  --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 56cd594fced7f..f35589853393c 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -413,52 +413,72 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 define amdgpu_kernel void @s_test_urem31_i64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_urem31_i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dword s4, s[0:1], 0xe
+; GCN-NEXT:    s_load_dword s2, s[0:1], 0xe
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshr_b32 s8, s2, 1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s2, v0
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_mov_b32 s2, -1
-; GCN-NEXT:    s_lshr_b32 s4, s4, 1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GCN-NEXT:    s_lshr_b32 s5, s3, 1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s5
-; GCN-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GCN-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
+; GCN-NEXT:    s_lshr_b32 s2, s3, 1
+; GCN-NEXT:    s_mov_b32 s4, s0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s4
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s5, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-NEXT:    s_sub_i32 s0, s2, s0
+; GCN-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_urem31_i64:
 ; GCN-IR:       ; %bb.0:
-; GCN-IR-NEXT:    s_load_dword s4, s[0:1], 0xe
+; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_lshr_b32 s8, s2, 1
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-IR-NEXT:    s_sub_i32 s2, 0, s8
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s2, v0
 ; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x9
+; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_mov_b32 s2, -1
-; GCN-IR-NEXT:    s_lshr_b32 s4, s4, 1
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GCN-IR-NEXT:    s_lshr_b32 s5, s3, 1
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v1, s5
-; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v0
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v1, v2
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GCN-IR-NEXT:    v_mad_f32 v1, -v2, v0, v1
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, v0
+; GCN-IR-NEXT:    s_lshr_b32 s2, s3, 1
+; GCN-IR-NEXT:    s_mov_b32 s4, s0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-IR-NEXT:    s_mov_b32 s5, s1
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s4
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s5, v0
-; GCN-IR-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-IR-NEXT:    s_mul_i32 s0, s0, s8
+; GCN-IR-NEXT:    s_sub_i32 s0, s2, s0
+; GCN-IR-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-IR-NEXT:    s_sub_i32 s1, s0, s8
+; GCN-IR-NEXT:    s_cmp_ge_u32 s0, s8
+; GCN-IR-NEXT:    s_cselect_b32 s0, s1, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = lshr i64 %x, 33
   %2 = lshr i64 %y, 33
@@ -472,39 +492,53 @@ define amdgpu_kernel void @s_test_urem31_v2i64(ptr addrspace(1) %out, <2 x i64>
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
 ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshr_b32 s2, s9, 1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-NEXT:    s_sub_i32 s3, 0, s2
+; GCN-NEXT:    s_lshr_b32 s4, s11, 1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s4
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v2
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GCN-NEXT:    s_lshr_b32 s3, s5, 1
+; GCN-NEXT:    s_lshr_b32 s5, s7, 1
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v2
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_readfirstlane_b32 s6, v0
+; GCN-NEXT:    s_mul_i32 s6, s6, s2
+; GCN-NEXT:    s_sub_i32 s3, s3, s6
+; GCN-NEXT:    s_sub_i32 s6, s3, s2
+; GCN-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-NEXT:    s_cselect_b32 s3, s6, s3
+; GCN-NEXT:    s_sub_i32 s6, s3, s2
+; GCN-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-NEXT:    s_cselect_b32 s6, s6, s3
+; GCN-NEXT:    s_sub_i32 s2, 0, s4
+; GCN-NEXT:    v_mul_lo_u32 v0, s2, v1
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    s_mov_b32 s2, -1
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_lshr_b32 s4, s9, 1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GCN-NEXT:    s_lshr_b32 s5, s5, 1
-; GCN-NEXT:    s_lshr_b32 s6, s7, 1
-; GCN-NEXT:    s_lshr_b32 s7, s11, 1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s5
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v0
-; GCN-NEXT:    v_cvt_f32_u32_e32 v4, s7
-; GCN-NEXT:    v_cvt_f32_u32_e32 v5, s6
-; GCN-NEXT:    v_mul_f32_e32 v3, v2, v3
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v6, v4
-; GCN-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-NEXT:    v_mad_f32 v2, -v3, v0, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
-; GCN-NEXT:    v_mul_f32_e32 v2, v5, v6
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GCN-NEXT:    v_mad_f32 v2, -v2, v4, v5
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v4
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s4
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v2, s7
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s5, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s6, v2
-; GCN-NEXT:    v_and_b32_e32 v2, 0x7fffffff, v2
+; GCN-NEXT:    v_mul_hi_u32 v0, v1, v0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GCN-NEXT:    v_mul_hi_u32 v2, s5, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    v_mov_b32_e32 v3, v1
+; GCN-NEXT:    v_readfirstlane_b32 s6, v2
+; GCN-NEXT:    s_mul_i32 s6, s6, s4
+; GCN-NEXT:    s_sub_i32 s5, s5, s6
+; GCN-NEXT:    s_sub_i32 s6, s5, s4
+; GCN-NEXT:    s_cmp_ge_u32 s5, s4
+; GCN-NEXT:    s_cselect_b32 s5, s6, s5
+; GCN-NEXT:    s_sub_i32 s6, s5, s4
+; GCN-NEXT:    s_cmp_ge_u32 s5, s4
+; GCN-NEXT:    s_cselect_b32 s4, s6, s5
+; GCN-NEXT:    v_mov_b32_e32 v2, s4
 ; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
 ;
@@ -512,39 +546,53 @@ define amdgpu_kernel void @s_test_urem31_v2i64(ptr addrspace(1) %out, <2 x i64>
 ; GCN-IR:       ; %bb.0:
 ; GCN-IR-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
 ; GCN-IR-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_lshr_b32 s2, s9, 1
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-IR-NEXT:    s_sub_i32 s3, 0, s2
+; GCN-IR-NEXT:    s_lshr_b32 s4, s11, 1
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v2, s4
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GCN-IR-NEXT:    s_lshr_b32 s3, s5, 1
+; GCN-IR-NEXT:    s_lshr_b32 s5, s7, 1
+; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v2
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_readfirstlane_b32 s6, v0
+; GCN-IR-NEXT:    s_mul_i32 s6, s6, s2
+; GCN-IR-NEXT:    s_sub_i32 s3, s3, s6
+; GCN-IR-NEXT:    s_sub_i32 s6, s3, s2
+; GCN-IR-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-IR-NEXT:    s_cselect_b32 s3, s6, s3
+; GCN-IR-NEXT:    s_sub_i32 s6, s3, s2
+; GCN-IR-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-IR-NEXT:    s_cselect_b32 s6, s6, s3
+; GCN-IR-NEXT:    s_sub_i32 s2, 0, s4
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s2, v1
 ; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s2, -1
-; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 1
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GCN-IR-NEXT:    s_lshr_b32 s5, s5, 1
-; GCN-IR-NEXT:    s_lshr_b32 s6, s7, 1
-; GCN-IR-NEXT:    s_lshr_b32 s7, s11, 1
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v2, s5
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v3, v0
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v4, s7
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v5, s6
-; GCN-IR-NEXT:    v_mul_f32_e32 v3, v2, v3
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v6, v4
-; GCN-IR-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-IR-NEXT:    v_mad_f32 v2, -v3, v0, v2
-; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v5, v6
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GCN-IR-NEXT:    v_mad_f32 v2, -v2, v4, v5
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v4
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s4
-; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_mul_lo_u32 v2, v2, s7
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s5, v0
-; GCN-IR-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
-; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s6, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 0x7fffffff, v2
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, v1, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s5, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, v1
+; GCN-IR-NEXT:    v_readfirstlane_b32 s6, v2
+; GCN-IR-NEXT:    s_mul_i32 s6, s6, s4
+; GCN-IR-NEXT:    s_sub_i32 s5, s5, s6
+; GCN-IR-NEXT:    s_sub_i32 s6, s5, s4
+; GCN-IR-NEXT:    s_cmp_ge_u32 s5, s4
+; GCN-IR-NEXT:    s_cselect_b32 s5, s6, s5
+; GCN-IR-NEXT:    s_sub_i32 s6, s5, s4
+; GCN-IR-NEXT:    s_cmp_ge_u32 s5, s4
+; GCN-IR-NEXT:    s_cselect_b32 s4, s6, s5
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s4
 ; GCN-IR-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = lshr <2 x i64> %x, <i64 33, i64 33>
@@ -616,39 +664,53 @@ define amdgpu_kernel void @s_test_urem23_64_v2i64(ptr addrspace(1) %out, <2 x i6
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
 ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; GCN-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-NEXT:    s_lshr_b32 s2, s9, 1
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-NEXT:    s_sub_i32 s3, 0, s2
+; GCN-NEXT:    s_lshr_b32 s4, s11, 9
+; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s4
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v2
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GCN-NEXT:    s_lshr_b32 s3, s5, 1
+; GCN-NEXT:    s_lshr_b32 s5, s7, 9
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v2
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_readfirstlane_b32 s6, v0
+; GCN-NEXT:    s_mul_i32 s6, s6, s2
+; GCN-NEXT:    s_sub_i32 s3, s3, s6
+; GCN-NEXT:    s_sub_i32 s6, s3, s2
+; GCN-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-NEXT:    s_cselect_b32 s3, s6, s3
+; GCN-NEXT:    s_sub_i32 s6, s3, s2
+; GCN-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-NEXT:    s_cselect_b32 s6, s6, s3
+; GCN-NEXT:    s_sub_i32 s2, 0, s4
+; GCN-NEXT:    v_mul_lo_u32 v0, s2, v1
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    s_mov_b32 s2, -1
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_lshr_b32 s4, s9, 1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GCN-NEXT:    s_lshr_b32 s5, s5, 1
-; GCN-NEXT:    s_lshr_b32 s6, s7, 9
-; GCN-NEXT:    s_lshr_b32 s7, s11, 9
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s5
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v3, v0
-; GCN-NEXT:    v_cvt_f32_u32_e32 v4, s7
-; GCN-NEXT:    v_cvt_f32_u32_e32 v5, s6
-; GCN-NEXT:    v_mul_f32_e32 v3, v2, v3
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v6, v4
-; GCN-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-NEXT:    v_mad_f32 v2, -v3, v0, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
-; GCN-NEXT:    v_mul_f32_e32 v2, v5, v6
-; GCN-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GCN-NEXT:    v_mad_f32 v2, -v2, v4, v5
-; GCN-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v4
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, s4
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v2, s7
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s5, v0
-; GCN-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s6, v2
-; GCN-NEXT:    v_and_b32_e32 v2, 0x7fffffff, v2
+; GCN-NEXT:    v_mul_hi_u32 v0, v1, v0
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GCN-NEXT:    v_mul_hi_u32 v2, s5, v0
+; GCN-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-NEXT:    v_mov_b32_e32 v3, v1
+; GCN-NEXT:    v_readfirstlane_b32 s6, v2
+; GCN-NEXT:    s_mul_i32 s6, s6, s4
+; GCN-NEXT:    s_sub_i32 s5, s5, s6
+; GCN-NEXT:    s_sub_i32 s6, s5, s4
+; GCN-NEXT:    s_cmp_ge_u32 s5, s4
+; GCN-NEXT:    s_cselect_b32 s5, s6, s5
+; GCN-NEXT:    s_sub_i32 s6, s5, s4
+; GCN-NEXT:    s_cmp_ge_u32 s5, s4
+; GCN-NEXT:    s_cselect_b32 s4, s6, s5
+; GCN-NEXT:    v_mov_b32_e32 v2, s4
 ; GCN-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
 ;
@@ -656,39 +718,53 @@ define amdgpu_kernel void @s_test_urem23_64_v2i64(ptr addrspace(1) %out, <2 x i6
 ; GCN-IR:       ; %bb.0:
 ; GCN-IR-NEXT:    s_load_dwordx8 s[4:11], s[0:1], 0xd
 ; GCN-IR-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
+; GCN-IR-NEXT:    s_lshr_b32 s2, s9, 1
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-IR-NEXT:    s_sub_i32 s3, 0, s2
+; GCN-IR-NEXT:    s_lshr_b32 s4, s11, 9
+; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v2, s4
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v2, v2
+; GCN-IR-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GCN-IR-NEXT:    s_lshr_b32 s3, s5, 1
+; GCN-IR-NEXT:    s_lshr_b32 s5, s7, 9
+; GCN-IR-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, s3, v0
+; GCN-IR-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v2
+; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-IR-NEXT:    v_readfirstlane_b32 s6, v0
+; GCN-IR-NEXT:    s_mul_i32 s6, s6, s2
+; GCN-IR-NEXT:    s_sub_i32 s3, s3, s6
+; GCN-IR-NEXT:    s_sub_i32 s6, s3, s2
+; GCN-IR-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-IR-NEXT:    s_cselect_b32 s3, s6, s3
+; GCN-IR-NEXT:    s_sub_i32 s6, s3, s2
+; GCN-IR-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-IR-NEXT:    s_cselect_b32 s6, s6, s3
+; GCN-IR-NEXT:    s_sub_i32 s2, 0, s4
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s2, v1
 ; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s2, -1
-; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 1
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GCN-IR-NEXT:    s_lshr_b32 s5, s5, 1
-; GCN-IR-NEXT:    s_lshr_b32 s6, s7, 9
-; GCN-IR-NEXT:    s_lshr_b32 s7, s11, 9
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v2, s5
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v3, v0
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v4, s7
-; GCN-IR-NEXT:    v_cvt_f32_u32_e32 v5, s6
-; GCN-IR-NEXT:    v_mul_f32_e32 v3, v2, v3
-; GCN-IR-NEXT:    v_rcp_iflag_f32_e32 v6, v4
-; GCN-IR-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-IR-NEXT:    v_mad_f32 v2, -v3, v0, v2
-; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v0
-; GCN-IR-NEXT:    v_mul_f32_e32 v2, v5, v6
-; GCN-IR-NEXT:    v_trunc_f32_e32 v2, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v0, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_cvt_u32_f32_e32 v3, v2
-; GCN-IR-NEXT:    v_mad_f32 v2, -v2, v4, v5
-; GCN-IR-NEXT:    v_cmp_ge_f32_e64 vcc, |v2|, v4
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, s4
-; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_mul_lo_u32 v2, v2, s7
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s5, v0
-; GCN-IR-NEXT:    v_and_b32_e32 v0, 0x7fffffff, v0
-; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s6, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 0x7fffffff, v2
+; GCN-IR-NEXT:    v_mul_hi_u32 v0, v1, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s5, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, v1
+; GCN-IR-NEXT:    v_readfirstlane_b32 s6, v2
+; GCN-IR-NEXT:    s_mul_i32 s6, s6, s4
+; GCN-IR-NEXT:    s_sub_i32 s5, s5, s6
+; GCN-IR-NEXT:    s_sub_i32 s6, s5, s4
+; GCN-IR-NEXT:    s_cmp_ge_u32 s5, s4
+; GCN-IR-NEXT:    s_cselect_b32 s5, s6, s5
+; GCN-IR-NEXT:    s_sub_i32 s6, s5, s4
+; GCN-IR-NEXT:    s_cmp_ge_u32 s5, s4
+; GCN-IR-NEXT:    s_cselect_b32 s4, s6, s5
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s4
 ; GCN-IR-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = lshr <2 x i64> %x, <i64 33, i64 41>


        


More information about the llvm-commits mailing list