[llvm] [AMDGPU] Remove setcc by using add/sub carryout (PR #155255)

via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 16 07:27:10 PDT 2025


https://github.com/LU-JOHN updated https://github.com/llvm/llvm-project/pull/155255

>From a52501594913e257a2201dbc967c04363e2cdcdf Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 25 Aug 2025 09:53:39 -0500
Subject: [PATCH 1/9] Remove setcc by using add/sub carryout

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  56 +++
 llvm/test/CodeGen/AMDGPU/addsub64_carry.ll    |  53 +--
 .../test/CodeGen/AMDGPU/carryout-selection.ll |  80 ++--
 llvm/test/CodeGen/AMDGPU/sdiv64.ll            | 364 +++++++++---------
 llvm/test/CodeGen/AMDGPU/srem64.ll            | 312 ++++++++-------
 llvm/test/CodeGen/AMDGPU/uaddsat.ll           |  47 +--
 llvm/test/CodeGen/AMDGPU/udiv64.ll            | 306 ++++++++-------
 llvm/test/CodeGen/AMDGPU/urem64.ll            | 201 +++++-----
 llvm/test/CodeGen/AMDGPU/usubsat.ll           |  54 +--
 9 files changed, 736 insertions(+), 737 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 2a977247bc2cb..1f56dc1a84460 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16292,6 +16292,62 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
     }
   }
 
+  // Eliminate setcc by using carryout from add/sub instruction
+
+  // X = ADD i64 Y, Z          Xlo = UADDO       i32 Ylo, Zlo
+  // setcc X ult Y     ->      XHi = UADDO_CARRY i32 Yhi, Zhi
+  // similarly for subtraction
+
+  // X = ADD i64 Y, 1          Xlo = UADDO       i32 Ylo, 1
+  // setcc X eq 0      ->      XHi = UADDO_CARRY i32 Yhi, 0
+
+  // Don't split a 64-bit add/sub into two 32-bit add/sub instructions for
+  // non-divergent operations.  This can result in lo/hi 32-bit operations
+  // being done in SGPR and VGPR with additional operations being needed
+  // to move operands and/or generate the intermediate carry.
+  if (VT == MVT::i64 && N->isDivergent() &&
+      ((((LHS.getOpcode() == ISD::ADD && CC == ISD::SETULT) ||
+         (LHS.getOpcode() == ISD::SUB && CC == ISD::SETUGT)) &&
+        LHS.getOperand(0) == RHS) ||
+       (LHS.getOpcode() == ISD::ADD && CC == ISD::SETEQ && CRHS &&
+        CRHS->isZero() && dyn_cast<ConstantSDNode>(LHS.getOperand(1)) &&
+        dyn_cast<ConstantSDNode>(LHS.getOperand(1))->isOne()))) {
+    EVT TargetType = MVT::i32;
+    EVT CarryVT = MVT::i1;
+    const SDValue One = DAG.getConstant(1, SL, TargetType);
+    bool IsAdd = LHS.getOpcode() == ISD::ADD;
+
+    SDValue Op0 = LHS.getOperand(0);
+    SDValue Op1 = LHS.getOperand(1);
+
+    SDValue Op0Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, Op0);
+    SDValue Op1Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, Op1);
+
+    SDValue Op0Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, TargetType, Op0, One);
+    SDValue Op1Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, TargetType, Op1, One);
+
+    SDValue NodeLo =
+        DAG.getNode(IsAdd ? ISD::UADDO : ISD::USUBO, SL,
+                    DAG.getVTList(TargetType, CarryVT), {Op0Lo, Op1Lo});
+
+    SDValue CarryInHi = SDValue(NodeLo.getNode(), 1);
+    SDValue NodeHi = DAG.getNode(IsAdd ? ISD::UADDO_CARRY : ISD::USUBO_CARRY,
+                                 SL, DAG.getVTList(TargetType, CarryVT),
+                                 {Op0Hi, Op1Hi, CarryInHi});
+
+    SDValue ResultLo = SDValue(NodeLo.getNode(), 0);
+    SDValue ResultHi = SDValue(NodeHi.getNode(), 0);
+
+    EVT ConcatType = EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
+    SDValue JoinedResult =
+        DAG.getBuildVector(ConcatType, SL, {ResultLo, ResultHi});
+
+    SDValue Result = DAG.getNode(ISD::BITCAST, SL, VT, JoinedResult);
+    SDValue Overflow = SDValue(NodeHi.getNode(), 1);
+    DCI.CombineTo(LHS.getNode(), Result);
+    return Overflow;
+  }
+
   if (VT != MVT::f32 && VT != MVT::f64 &&
       (!Subtarget->has16BitInsts() || VT != MVT::f16))
     return SDValue();
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
index d326966e849e2..397835972e4d4 100644
--- a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
+++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
@@ -17,12 +17,9 @@ define %struct.uint96 @v_add64_32(i64 %val64A, i64 %val64B, i32 %val32) {
 ; CHECK-LABEL: v_add64_32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v5, vcc, v0, v2
-; CHECK-NEXT:    v_addc_co_u32_e32 v6, vcc, v1, v3, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[5:6], v[0:1]
-; CHECK-NEXT:    v_mov_b32_e32 v0, v5
+; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
 ; CHECK-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v4, vcc
-; CHECK-NEXT:    v_mov_b32_e32 v1, v6
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %sum64 = add i64 %val64A, %val64B
   %obit = icmp ult i64 %sum64, %val64A
@@ -38,16 +35,14 @@ define <2 x i64> @v_uadd_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_add_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT:    v_add_co_u32_e64 v4, s[4:5], v0, v4
 ; CHECK-NEXT:    v_addc_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT:    v_add_co_u32_e32 v4, vcc, v0, v4
-; CHECK-NEXT:    v_addc_co_u32_e32 v5, vcc, v1, v5, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    v_addc_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, v2
+; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %pair = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -63,16 +58,14 @@ define <2 x i64> @v_usub_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_sub_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT:    v_sub_co_u32_e64 v4, s[4:5], v0, v4
 ; CHECK-NEXT:    v_subb_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT:    v_sub_co_u32_e32 v4, vcc, v0, v4
-; CHECK-NEXT:    v_subb_co_u32_e32 v5, vcc, v1, v5, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    v_subb_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, v2
+; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %pair = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -87,10 +80,9 @@ define i64 @v_uadd_i64(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_uadd_i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
-; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -109,7 +101,6 @@ define i64 @v_uadd_p1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, 1, v0
 ; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
@@ -147,10 +138,9 @@ define i64 @v_usub_p1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_usub_p1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, -1, v0
-; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_subrev_co_u32_e32 v0, vcc, 1, v0
+; CHECK-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -167,10 +157,9 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_usub_n1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, 1, v0
-; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_subrev_co_u32_e32 v0, vcc, -1, v0
+; CHECK-NEXT:    v_subbrev_co_u32_e32 v1, vcc, -1, v1, vcc
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index b71885b54b5a2..1158d73c0c152 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -841,7 +841,7 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GCN-ISEL-LABEL: name:   vuaddo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_ADD_U64_PSEUDO
+; GCN-ISEL: V_ADD_CO_U32_e64
 
 define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
 ; CISI-LABEL: vuaddo64:
@@ -854,9 +854,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s4, s0
 ; CISI-NEXT:    v_mov_b32_e32 v1, s9
 ; CISI-NEXT:    v_add_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; CISI-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[0:1]
 ; CISI-NEXT:    s_mov_b32 s5, s1
+; CISI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s6
@@ -876,7 +875,6 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI-NEXT:    v_mov_b32_e32 v6, s5
 ; VI-NEXT:    v_add_u32_e32 v5, vcc, s4, v0
 ; VI-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
-; VI-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[5:6]
 ; VI-NEXT:    v_mov_b32_e32 v2, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s2
 ; VI-NEXT:    v_mov_b32_e32 v4, s3
@@ -894,7 +892,6 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s6, v0
 ; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v2, v0, s[2:3]
@@ -909,8 +906,7 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1010-NEXT:    v_add_co_u32 v0, s4, s6, v0
 ; GFX1010-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1010-NEXT:    s_endpgm
@@ -923,9 +919,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W32-NEXT:    v_add_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s4
-; GFX1030W32-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1030W32-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W32-NEXT:    s_endpgm
@@ -938,9 +933,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W64-NEXT:    v_add_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
-; GFX1030W64-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX1030W64-NEXT:    v_add_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
+; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W64-NEXT:    s_endpgm
@@ -955,10 +949,9 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_co_u32 v0, s4, s6, v0
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    global_store_b8 v2, v3, s[2:3]
@@ -969,16 +962,17 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    s_load_b64 s[6:7], s[4:5], 0x34
 ; GFX1250-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX1250-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT:    v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[2:3]
-; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_add_co_u32 v0, s4, s6, v0
+; GFX1250-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1250-NEXT:    s_clause 0x1
-; GFX1250-NEXT:    global_store_b64 v1, v[2:3], s[0:1]
-; GFX1250-NEXT:    global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT:    global_store_b8 v2, v3, s[2:3]
 ; GFX1250-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
@@ -1821,7 +1815,7 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GCN-ISEL-LABEL: name:   vusubo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_SUB_U64_PSEUDO
+; GCN-ISEL: V_SUBB_U32_e64
 
 define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
 ; CISI-LABEL: vusubo64:
@@ -1834,9 +1828,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s4, s0
 ; CISI-NEXT:    v_mov_b32_e32 v1, s9
 ; CISI-NEXT:    v_sub_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CISI-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
 ; CISI-NEXT:    s_mov_b32 s5, s1
+; CISI-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s6
@@ -1856,7 +1849,6 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI-NEXT:    v_mov_b32_e32 v6, s5
 ; VI-NEXT:    v_sub_u32_e32 v5, vcc, s4, v0
 ; VI-NEXT:    v_subbrev_u32_e32 v6, vcc, 0, v6, vcc
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
 ; VI-NEXT:    v_mov_b32_e32 v2, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s2
 ; VI-NEXT:    v_mov_b32_e32 v4, s3
@@ -1874,7 +1866,6 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s6, v0
 ; GFX9-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v2, v0, s[2:3]
@@ -1889,8 +1880,7 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1010-NEXT:    v_sub_co_u32 v0, s4, s6, v0
 ; GFX1010-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1010-NEXT:    s_endpgm
@@ -1903,9 +1893,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W32-NEXT:    v_sub_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
-; GFX1030W32-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1030W32-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W32-NEXT:    s_endpgm
@@ -1918,9 +1907,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W64-NEXT:    v_sub_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
-; GFX1030W64-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX1030W64-NEXT:    v_sub_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
+; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W64-NEXT:    s_endpgm
@@ -1935,10 +1923,9 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_sub_co_u32 v0, s4, s6, v0
-; GFX11-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX11-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    global_store_b8 v2, v3, s[2:3]
@@ -1949,16 +1936,17 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    s_load_b64 s[6:7], s[4:5], 0x34
 ; GFX1250-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX1250-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT:    v_sub_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[2:3]
-; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_sub_co_u32 v0, s4, s6, v0
+; GFX1250-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1250-NEXT:    s_clause 0x1
-; GFX1250-NEXT:    global_store_b64 v1, v[2:3], s[0:1]
-; GFX1250-NEXT:    global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT:    global_store_b8 v2, v3, s[2:3]
 ; GFX1250-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 5e76c7d7c734f..8cd9c13fb3ac5 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -356,25 +356,25 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_sdiv:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v13, 31, v3
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v1, v12, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v13
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v13
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v11, 31, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v1, v10, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v11
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v11
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v6
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v7
-; GCN-IR-NEXT:    v_min_u32_e32 v11, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v10, v11
+; GCN-IR-NEXT:    v_min_u32_e32 v9, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v8, v9
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[6:7], 0, 0, s[6:7]
@@ -383,70 +383,69 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v14, v12
-; GCN-IR-NEXT:    v_mov_b32_e32 v15, v13
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v11
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v7, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v6, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[6:7], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v4, v10
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[6:7], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v4, v11
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[6:7], v14
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v4, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, v4, v9
+; GCN-IR-NEXT:    v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v16, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v17, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, 1, v16
+; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB1_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v13, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v15, v14
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v11, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v13, v12
 ; GCN-IR-NEXT:    v_xor_b32_e32 v3, v4, v0
 ; GCN-IR-NEXT:    v_xor_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v3, v0
@@ -1412,17 +1411,17 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1430,69 +1429,68 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, 24, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v14, vcc, 58, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB11_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB11_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 24, %x
   ret i64 %result
@@ -1605,17 +1603,17 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1624,70 +1622,69 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v14, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB12_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 32768, %x
   ret i64 %result
@@ -1707,20 +1704,20 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, v1, v10, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v8
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v0, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, v1, v8, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v0, v4
 ; GCN-IR-NEXT:    v_add_i32_e64 v0, s[4:5], 32, v0
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v1, v5
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v0, v1
-; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 48, v8
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v0, v1
+; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 48, v6
 ; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v8
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
@@ -1730,15 +1727,15 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 63, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[4:5], v6
@@ -1751,40 +1748,39 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s12, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s10, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v6, v0
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 0x8000, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v4, v6
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v7, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v3, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v2, v0
 ; GCN-IR-NEXT:  .LBB13_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v11
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v8
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v9
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v9, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 %x, 32768
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index c7b690fbd4a21..a1277c7e4778b 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -333,12 +333,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_srem:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v14, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v14
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v14, vcc
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
 ; GCN-IR-NEXT:    v_xor_b32_e32 v2, v2, v4
 ; GCN-IR-NEXT:    v_xor_b32_e32 v3, v3, v4
 ; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v2, v4
@@ -346,12 +346,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT:    v_min_u32_e32 v11, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v10, v11
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -360,7 +360,7 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v15, v14
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
@@ -368,54 +368,53 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, v6, v11
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT:    v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v16, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v17, v11, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v9, v11, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, 1, v16
+; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
@@ -429,10 +428,10 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v15
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v15, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v13
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, %y
   ret i64 %result
@@ -1538,9 +1537,9 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1554,53 +1553,52 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 58, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB11_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1729,9 +1727,9 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1746,54 +1744,53 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1828,20 +1825,20 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_srem_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v8
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
@@ -1852,14 +1849,14 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
@@ -1872,31 +1869,30 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1905,10 +1901,10 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[4:5], 15
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, 32768
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
index 923017400adb1..7f89581d00fde 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
@@ -693,52 +693,47 @@ define i64 @v_uaddsat_i64(i64 %lhs, i64 %rhs) {
 ; GFX6-LABEL: v_uaddsat_i64:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v0, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_uaddsat_i64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v0, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_uaddsat_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_uaddsat_i64:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc_lo
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_uaddsat_i64:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %result = call i64 @llvm.uadd.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index bf1f6980fe25a..6e502501be838 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -313,19 +313,19 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v14, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v15, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[6:7], v14, v15
+; GCN-IR-NEXT:    v_min_u32_e32 v9, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[6:7], v8, v9
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[6:7], 0, 0, s[6:7]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[6:7], 0, 0, s[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[6:7]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[8:9]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v0, 0, s[4:5]
@@ -333,55 +333,54 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v8
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v2
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v10
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v0, v14
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v15
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v10
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, v6, v9
+; GCN-IR-NEXT:    v_addc_u32_e64 v13, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v10, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v11, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v8, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v9, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v7
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v7, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v6, v0
@@ -1065,12 +1064,12 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffd0, v10
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffd0, v8
+; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[6:7]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0x8000
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[4:5]
@@ -1080,55 +1079,54 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v8
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB9_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
@@ -1155,13 +1153,13 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], 48, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1169,15 +1167,15 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v8
@@ -1190,31 +1188,30 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 0x8000, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v6
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB10_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB10_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB10_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
@@ -1355,13 +1352,13 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], 59, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 59, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1369,51 +1366,50 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc4, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 0xffffffc4, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v7
+; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 23, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 24, v8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 23, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 24, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v6
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index c4d928185d8f4..eb283c33ea854 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -322,12 +322,12 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT:    v_min_u32_e32 v11, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v10, v11
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -343,54 +343,53 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, v6, v11
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT:    v_addc_u32_e64 v15, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v11, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v9, v11, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
@@ -1164,8 +1163,8 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffd0, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffd0, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1180,54 +1179,53 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB8_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB8_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB8_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB8_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1260,8 +1258,8 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v8
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
@@ -1275,14 +1273,14 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
@@ -1295,31 +1293,30 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB9_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
diff --git a/llvm/test/CodeGen/AMDGPU/usubsat.ll b/llvm/test/CodeGen/AMDGPU/usubsat.ll
index 90491a07289a0..3ddb2f02c48fe 100644
--- a/llvm/test/CodeGen/AMDGPU/usubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/usubsat.ll
@@ -730,52 +730,38 @@ define i64 @v_usubsat_i64(i64 %lhs, i64 %rhs) {
 ; GFX6-LABEL: v_usubsat_i64:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, v0, v2
-; GFX6-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_usubsat_i64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_sub_u32_e32 v2, vcc, v0, v2
-; GFX8-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_usubsat_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10-LABEL: v_usubsat_i64:
-; GFX10:       ; %bb.0:
-; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT:    v_sub_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX10-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11-LABEL: v_usubsat_i64:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT:    v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX10PLUS-LABEL: v_usubsat_i64:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX10PLUS-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10PLUS-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc_lo
+; GFX10PLUS-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc_lo
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %result = call i64 @llvm.usub.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %result
 }

>From fd91754550d9adb13b2e257419d0c22c14d6347e Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 28 Aug 2025 10:09:37 -0500
Subject: [PATCH 2/9] Get hi-half cleanly

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 1f56dc1a84460..a6e7035569def 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16314,7 +16314,6 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
         dyn_cast<ConstantSDNode>(LHS.getOperand(1))->isOne()))) {
     EVT TargetType = MVT::i32;
     EVT CarryVT = MVT::i1;
-    const SDValue One = DAG.getConstant(1, SL, TargetType);
     bool IsAdd = LHS.getOpcode() == ISD::ADD;
 
     SDValue Op0 = LHS.getOperand(0);
@@ -16323,8 +16322,8 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
     SDValue Op0Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, Op0);
     SDValue Op1Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, Op1);
 
-    SDValue Op0Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, TargetType, Op0, One);
-    SDValue Op1Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, TargetType, Op1, One);
+    SDValue Op0Hi = getHiHalf64(Op0, DAG);
+    SDValue Op1Hi = getHiHalf64(Op1, DAG);
 
     SDValue NodeLo =
         DAG.getNode(IsAdd ? ISD::UADDO : ISD::USUBO, SL,

>From 60cff7f65edb579dc2d24cc92f7eee1806536982 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 28 Aug 2025 14:43:08 -0500
Subject: [PATCH 3/9] Use sd_match.  Make comment clearer.

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a6e7035569def..a88202c0278bb 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16294,24 +16294,24 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
 
   // Eliminate setcc by using carryout from add/sub instruction
 
-  // X = ADD i64 Y, Z          Xlo = UADDO       i32 Ylo, Zlo
-  // setcc X ult Y     ->      XHi = UADDO_CARRY i32 Yhi, Zhi
+  // LHS = ADD i64 RHS, Z          LHSlo = UADDO       i32 RHSlo, Zlo
+  // setcc LHS ult RHS     ->      LHSHi = UADDO_CARRY i32 RHShi, Zhi
   // similarly for subtraction
 
-  // X = ADD i64 Y, 1          Xlo = UADDO       i32 Ylo, 1
-  // setcc X eq 0      ->      XHi = UADDO_CARRY i32 Yhi, 0
+  // LHS = ADD i64 Y, 1            LHSlo = UADDO       i32 Ylo, 1
+  // setcc LHS eq 0        ->      LHSHi = UADDO_CARRY i32 Yhi, 0
 
   // Don't split a 64-bit add/sub into two 32-bit add/sub instructions for
   // non-divergent operations.  This can result in lo/hi 32-bit operations
   // being done in SGPR and VGPR with additional operations being needed
   // to move operands and/or generate the intermediate carry.
   if (VT == MVT::i64 && N->isDivergent() &&
-      ((((LHS.getOpcode() == ISD::ADD && CC == ISD::SETULT) ||
-         (LHS.getOpcode() == ISD::SUB && CC == ISD::SETUGT)) &&
-        LHS.getOperand(0) == RHS) ||
-       (LHS.getOpcode() == ISD::ADD && CC == ISD::SETEQ && CRHS &&
-        CRHS->isZero() && dyn_cast<ConstantSDNode>(LHS.getOperand(1)) &&
-        dyn_cast<ConstantSDNode>(LHS.getOperand(1))->isOne()))) {
+      ((CC == ISD::SETULT &&
+        sd_match(LHS, m_Add(m_Specific(RHS), m_Value()))) ||
+       (CC == ISD::SETUGT &&
+        sd_match(LHS, m_Sub(m_Specific(RHS), m_Value()))) ||
+       (CC == ISD::SETEQ && CRHS && CRHS->isZero() &&
+        sd_match(LHS, m_Add(m_Value(), m_One()))))) {
     EVT TargetType = MVT::i32;
     EVT CarryVT = MVT::i1;
     bool IsAdd = LHS.getOpcode() == ISD::ADD;

>From e6b0fe65f76cfe80648ac1eb2297361d7e841e57 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 8 Sep 2025 06:54:06 -0500
Subject: [PATCH 4/9] Use getValue

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index a88202c0278bb..9ff3fb11d50e9 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16329,20 +16329,20 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
         DAG.getNode(IsAdd ? ISD::UADDO : ISD::USUBO, SL,
                     DAG.getVTList(TargetType, CarryVT), {Op0Lo, Op1Lo});
 
-    SDValue CarryInHi = SDValue(NodeLo.getNode(), 1);
+    SDValue CarryInHi = NodeLo.getValue(1);
     SDValue NodeHi = DAG.getNode(IsAdd ? ISD::UADDO_CARRY : ISD::USUBO_CARRY,
                                  SL, DAG.getVTList(TargetType, CarryVT),
                                  {Op0Hi, Op1Hi, CarryInHi});
 
-    SDValue ResultLo = SDValue(NodeLo.getNode(), 0);
-    SDValue ResultHi = SDValue(NodeHi.getNode(), 0);
+    SDValue ResultLo = NodeLo.getValue(0);
+    SDValue ResultHi = NodeHi.getValue(0);
 
     EVT ConcatType = EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
     SDValue JoinedResult =
         DAG.getBuildVector(ConcatType, SL, {ResultLo, ResultHi});
 
     SDValue Result = DAG.getNode(ISD::BITCAST, SL, VT, JoinedResult);
-    SDValue Overflow = SDValue(NodeHi.getNode(), 1);
+    SDValue Overflow = NodeHi.getValue(1);
     DCI.CombineTo(LHS.getNode(), Result);
     return Overflow;
   }

>From 46ae930f73caa0b4eddd554e23fb8b41ba0adc5b Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 8 Sep 2025 07:26:20 -0500
Subject: [PATCH 5/9] Use explicit type

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 9ff3fb11d50e9..ce91fc24e7df2 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16337,9 +16337,8 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
     SDValue ResultLo = NodeLo.getValue(0);
     SDValue ResultHi = NodeHi.getValue(0);
 
-    EVT ConcatType = EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
     SDValue JoinedResult =
-        DAG.getBuildVector(ConcatType, SL, {ResultLo, ResultHi});
+        DAG.getBuildVector(MVT::v2i32, SL, {ResultLo, ResultHi});
 
     SDValue Result = DAG.getNode(ISD::BITCAST, SL, VT, JoinedResult);
     SDValue Overflow = NodeHi.getValue(1);

>From 8cbfe2b32c6b8d3fad3e857047ed5404e098ed9e Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 8 Sep 2025 18:12:19 -0500
Subject: [PATCH 6/9] Update new tests

Signed-off-by: John Lu <John.Lu at amd.com>
---
 .../test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll | 98 ++++++++-----------
 .../CodeGen/AMDGPU/a-v-global-atomicrmw.ll    | 24 ++---
 llvm/test/CodeGen/AMDGPU/sdiv64.ll            | 14 +--
 llvm/test/CodeGen/AMDGPU/srem64.ll            | 14 +--
 llvm/test/CodeGen/AMDGPU/udiv64.ll            | 14 +--
 llvm/test/CodeGen/AMDGPU/urem64.ll            | 14 +--
 6 files changed, 78 insertions(+), 100 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
index 7b33374453010..4ec218bd9b8b3 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
@@ -8757,9 +8757,8 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v6
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8778,20 +8777,19 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_cbranch_execz .LBB113_6
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[4:5]
-; GFX90A-NEXT:    v_cndmask_b32_e32 v4, -1, v4, vcc
-; GFX90A-NEXT:    buffer_load_dword v0, v4, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v1, v4, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_cndmask_b32_e32 v0, -1, v4, vcc
+; GFX90A-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v6
+; GFX90A-NEXT:    v_sub_co_u32_e32 v3, vcc, v1, v6
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v3, 0, vcc
-; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v1
-; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v0, v4, s[0:3], 0 offen offset:4
-; GFX90A-NEXT:    buffer_store_dword v2, v4, s[0:3], 0 offen
+; GFX90A-NEXT:    v_subb_co_u32_e32 v4, vcc, v2, v7, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v1
+; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v2
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX90A-NEXT:    buffer_store_dword v3, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:  .LBB113_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GFX90A-NEXT:    ;;#ASMSTART
@@ -8825,10 +8823,9 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8854,11 +8851,11 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
 ; GFX950-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
+; GFX950-NEXT:    s_nop 0
 ; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
 ; GFX950-NEXT:    scratch_store_dwordx2 v4, v[2:3], off
 ; GFX950-NEXT:  .LBB113_6: ; %atomicrmw.phi
 ; GFX950-NEXT:    s_or_b64 exec, exec, s[0:1]
@@ -8898,9 +8895,8 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -8916,18 +8912,17 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_cbranch_execz .LBB114_6
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX90A-NEXT:    v_cndmask_b32_e32 v6, -1, v0, vcc
-; GFX90A-NEXT:    buffer_load_dword v4, v6, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_cndmask_b32_e32 v0, -1, v0, vcc
+; GFX90A-NEXT:    buffer_load_dword v4, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v5, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v4, v2
+; GFX90A-NEXT:    v_sub_co_u32_e32 v1, vcc, v4, v2
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v5, v3, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_subb_co_u32_e32 v2, vcc, v5, v3, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v0, v6, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:  .LBB114_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GFX90A-NEXT:    ;;#ASMSTART
@@ -8960,10 +8955,9 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -8986,7 +8980,6 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -17062,9 +17055,8 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17083,20 +17075,19 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; GFX90A-NEXT:    s_cselect_b32 s4, s4, -1
-; GFX90A-NEXT:    v_mov_b32_e32 v6, s4
-; GFX90A-NEXT:    buffer_load_dword v0, v6, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v1, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_mov_b32_e32 v0, s4
+; GFX90A-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v4
+; GFX90A-NEXT:    v_sub_co_u32_e32 v3, vcc, v1, v4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v3, 0, vcc
-; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v1
-; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v0, v6, s[0:3], 0 offen offset:4
-; GFX90A-NEXT:    buffer_store_dword v2, v6, s[0:3], 0 offen
+; GFX90A-NEXT:    v_subb_co_u32_e32 v4, vcc, v2, v5, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v1
+; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v2
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX90A-NEXT:    buffer_store_dword v3, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:  .LBB221_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    ;;#ASMSTART
 ; GFX90A-NEXT:    ; use a[0:1]
@@ -17129,10 +17120,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17156,11 +17146,11 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
 ; GFX950-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
+; GFX950-NEXT:    s_nop 0
 ; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
 ; GFX950-NEXT:    scratch_store_dwordx2 off, v[2:3], s0
 ; GFX950-NEXT:  .LBB221_6: ; %atomicrmw.phi
 ; GFX950-NEXT:    ;;#ASMSTART
@@ -17199,9 +17189,8 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17224,7 +17213,6 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    buffer_store_dword v0, v4, s[0:3], 0 offen
@@ -17260,10 +17248,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17284,7 +17271,6 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
index c98fff96d7b8a..34a4899123749 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
@@ -5804,9 +5804,8 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v4, v6
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5839,10 +5838,9 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v4, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5880,9 +5878,8 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -5911,10 +5908,9 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -11573,9 +11569,8 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11609,10 +11604,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11651,9 +11645,8 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
 ; GFX90A-NEXT:    v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -11683,10 +11676,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 8cd9c13fb3ac5..18b6138f02109 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -1738,14 +1738,14 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[4:5], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffcf, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[4:5], v[4:5], v7
+; GCN-IR-NEXT:    v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index a1277c7e4778b..5db2916bff36a 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -1859,14 +1859,14 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_addc_u32_e64 v13, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 6e502501be838..f144d36589894 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -1178,14 +1178,14 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v7
+; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index eb283c33ea854..15a940f1b1dee 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -1283,14 +1283,14 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1

>From aa23e403f1b62785881daf1371240230f16a383f Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 10 Sep 2025 17:40:19 -0500
Subject: [PATCH 7/9] Calc IsVALU correctly during UADDO/USUBO selection

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp |   14 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |   13 +-
 .../AMDGPU/amdgpu-codegenprepare-idiv.ll      | 3170 ++++++++--------
 .../test/CodeGen/AMDGPU/carryout-selection.ll | 1815 +++++----
 .../expand-scalar-carry-out-select-user.ll    |   61 +-
 llvm/test/CodeGen/AMDGPU/sdiv64.ll            |  492 +--
 llvm/test/CodeGen/AMDGPU/srem.ll              | 3329 +++++++++--------
 llvm/test/CodeGen/AMDGPU/srem64.ll            |  745 ++--
 llvm/test/CodeGen/AMDGPU/udiv64.ll            |  209 +-
 llvm/test/CodeGen/AMDGPU/urem64.ll            |  469 ++-
 llvm/test/CodeGen/AMDGPU/wave32.ll            |  514 ++-
 11 files changed, 5701 insertions(+), 5130 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 3785d0f7f2688..fe3bff0cbaa8b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -1089,9 +1089,14 @@ void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) {
   for (SDNode::user_iterator UI = N->user_begin(), E = N->user_end(); UI != E;
        ++UI)
     if (UI.getUse().getResNo() == 1) {
-      if ((IsAdd && (UI->getOpcode() != ISD::UADDO_CARRY)) ||
-          (!IsAdd && (UI->getOpcode() != ISD::USUBO_CARRY))) {
-        IsVALU = true;
+      if (UI->isMachineOpcode()) {
+        if (UI->getMachineOpcode() !=
+            (IsAdd ? AMDGPU::S_ADD_CO_PSEUDO : AMDGPU::S_SUB_CO_PSEUDO))
+          IsVALU = true;
+        break;
+      } else {
+        if (UI->getOpcode() != (IsAdd ? ISD::UADDO_CARRY : ISD::USUBO_CARRY))
+          IsVALU = true;
         break;
       }
     }
@@ -1104,8 +1109,7 @@ void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) {
         {N->getOperand(0), N->getOperand(1),
          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
   } else {
-    unsigned Opc = N->getOpcode() == ISD::UADDO ? AMDGPU::S_UADDO_PSEUDO
-                                                : AMDGPU::S_USUBO_PSEUDO;
+    unsigned Opc = IsAdd ? AMDGPU::S_UADDO_PSEUDO : AMDGPU::S_USUBO_PSEUDO;
 
     CurDAG->SelectNodeTo(N, Opc, N->getVTList(),
                          {N->getOperand(0), N->getOperand(1)});
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index ce91fc24e7df2..504649173f977 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5810,6 +5810,9 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     return lowerWaveReduce(MI, *BB, *getSubtarget(), AMDGPU::S_XOR_B64);
   case AMDGPU::S_UADDO_PSEUDO:
   case AMDGPU::S_USUBO_PSEUDO: {
+    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+    const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
+    const SIRegisterInfo *TRI = ST.getRegisterInfo();
     const DebugLoc &DL = MI.getDebugLoc();
     MachineOperand &Dest0 = MI.getOperand(0);
     MachineOperand &Dest1 = MI.getOperand(1);
@@ -5825,9 +5828,13 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
         .add(Src1);
     // clang-format on
 
-    BuildMI(*BB, MI, DL, TII->get(AMDGPU::S_CSELECT_B64), Dest1.getReg())
-        .addImm(1)
-        .addImm(0);
+    const TargetRegisterClass *Dest1RC = MRI.getRegClass(Dest1.getReg());
+    unsigned Dest1Size = TRI->getRegSizeInBits(*Dest1RC);
+    assert(Dest1Size == 64 || Dest1Size == 32);
+    unsigned SelOpc =
+        (Dest1Size == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
+
+    BuildMI(*BB, MI, DL, TII->get(SelOpc), Dest1.getReg()).addImm(1).addImm(0);
 
     MI.eraseFromParent();
     return BB;
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index b2dcd77274989..e68353e5223fb 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -7792,8 +7792,9 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-LABEL: sdiv_i64_pow2_shl_denom:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0xd
-; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
+; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
+; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX6-NEXT:    s_lshl_b64 s[0:1], 0x1000, s0
 ; GFX6-NEXT:    s_ashr_i32 s8, s1, 31
@@ -7803,143 +7804,175 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    s_xor_b64 s[10:11], s[0:1], s[8:9]
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s10
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s11
-; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT:    s_sub_u32 s4, 0, s10
-; GFX6-NEXT:    s_subb_u32 s5, 0, s11
+; GFX6-NEXT:    s_sub_u32 s12, 0, s10
+; GFX6-NEXT:    s_subb_u32 s13, 0, s11
 ; GFX6-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_ashr_i32 s12, s3, 31
-; GFX6-NEXT:    s_add_u32 s2, s2, s12
-; GFX6-NEXT:    s_mov_b32 s13, s12
 ; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
 ; GFX6-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT:    s_addc_u32 s3, s3, s12
-; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[12:13]
-; GFX6-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GFX6-NEXT:    v_mul_lo_u32 v5, s5, v0
-; GFX6-NEXT:    v_mul_lo_u32 v4, s4, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GFX6-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GFX6-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v4
-; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GFX6-NEXT:    v_mul_hi_u32 v8, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GFX6-NEXT:    v_mul_lo_u32 v4, s5, v0
-; GFX6-NEXT:    s_mov_b32 s5, s1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, s4, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GFX6-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, s2, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s2, v0
-; GFX6-NEXT:    v_mul_hi_u32 v4, s2, v1
-; GFX6-NEXT:    v_mul_hi_u32 v5, s3, v1
-; GFX6-NEXT:    v_mul_lo_u32 v1, s3, v1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v4, s3, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s3, v0
-; GFX6-NEXT:    s_mov_b32 s4, s0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, s10, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s10, v0
-; GFX6-NEXT:    v_mul_lo_u32 v4, s11, v0
-; GFX6-NEXT:    v_mov_b32_e32 v5, s11
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, s10, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
-; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s3, v2
-; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s2, v3
-; GFX6-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GFX6-NEXT:    v_subrev_i32_e64 v5, s[0:1], s10, v3
-; GFX6-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s11, v4
-; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s10, v5
-; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s11, v4
-; GFX6-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
-; GFX6-NEXT:    v_add_i32_e64 v5, s[0:1], 1, v0
-; GFX6-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
-; GFX6-NEXT:    v_add_i32_e64 v7, s[0:1], 2, v0
-; GFX6-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
-; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GFX6-NEXT:    v_cndmask_b32_e64 v4, v5, v7, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v5, v6, v8, s[0:1]
-; GFX6-NEXT:    v_mov_b32_e32 v6, s3
-; GFX6-NEXT:    v_subb_u32_e32 v2, vcc, v6, v2, vcc
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s11, v2
-; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s10, v3
-; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s11, v2
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
-; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
-; GFX6-NEXT:    s_xor_b64 s[0:1], s[12:13], s[8:9]
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
-; GFX6-NEXT:    v_xor_b32_e32 v0, s0, v0
-; GFX6-NEXT:    v_xor_b32_e32 v1, s1, v1
+; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GFX6-NEXT:    v_mul_hi_u32 v2, s12, v0
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX6-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX6-NEXT:    s_mul_i32 s1, s12, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
+; GFX6-NEXT:    s_mul_i32 s15, s13, s0
+; GFX6-NEXT:    s_mul_i32 s16, s12, s0
+; GFX6-NEXT:    s_add_i32 s1, s17, s1
+; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s16
+; GFX6-NEXT:    s_add_i32 s1, s1, s15
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s1
+; GFX6-NEXT:    v_mul_hi_u32 v4, v1, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v3
+; GFX6-NEXT:    s_mul_i32 s17, s0, s1
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s1
+; GFX6-NEXT:    s_add_u32 s15, s15, s17
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v0
+; GFX6-NEXT:    s_addc_u32 s17, 0, s17
+; GFX6-NEXT:    s_mul_i32 s16, s14, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s18, v4
+; GFX6-NEXT:    s_add_u32 s15, s15, s16
+; GFX6-NEXT:    s_addc_u32 s15, s17, s18
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
+; GFX6-NEXT:    s_addc_u32 s16, s16, 0
+; GFX6-NEXT:    s_mul_i32 s1, s14, s1
+; GFX6-NEXT:    s_add_u32 s1, s15, s1
+; GFX6-NEXT:    s_addc_u32 s15, 0, s16
+; GFX6-NEXT:    s_add_i32 s16, s0, s1
+; GFX6-NEXT:    v_mov_b32_e32 v0, s16
+; GFX6-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
+; GFX6-NEXT:    s_addc_u32 s14, s14, s15
+; GFX6-NEXT:    s_mul_i32 s0, s12, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX6-NEXT:    s_add_i32 s0, s1, s0
+; GFX6-NEXT:    s_mul_i32 s13, s13, s16
+; GFX6-NEXT:    s_mul_i32 s1, s12, s16
+; GFX6-NEXT:    s_add_i32 s0, s0, s13
 ; GFX6-NEXT:    v_mov_b32_e32 v2, s1
-; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
-; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT:    v_mov_b32_e32 v0, s0
+; GFX6-NEXT:    v_mul_hi_u32 v3, s14, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s16, v2
+; GFX6-NEXT:    v_mul_hi_u32 v1, s14, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s16, v0
+; GFX6-NEXT:    s_mul_i32 s13, s16, s0
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
+; GFX6-NEXT:    s_add_u32 s13, s17, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v0
+; GFX6-NEXT:    s_mul_i32 s1, s14, s1
+; GFX6-NEXT:    s_addc_u32 s15, 0, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v3
+; GFX6-NEXT:    s_add_u32 s1, s13, s1
+; GFX6-NEXT:    s_addc_u32 s1, s15, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
+; GFX6-NEXT:    s_addc_u32 s12, s12, 0
+; GFX6-NEXT:    s_mul_i32 s0, s14, s0
+; GFX6-NEXT:    s_add_u32 s0, s1, s0
+; GFX6-NEXT:    s_addc_u32 s12, 0, s12
+; GFX6-NEXT:    s_add_i32 s15, s16, s0
+; GFX6-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
+; GFX6-NEXT:    s_addc_u32 s14, s14, s12
+; GFX6-NEXT:    s_ashr_i32 s12, s7, 31
+; GFX6-NEXT:    s_add_u32 s0, s6, s12
+; GFX6-NEXT:    s_mov_b32 s13, s12
+; GFX6-NEXT:    s_addc_u32 s1, s7, s12
+; GFX6-NEXT:    s_xor_b64 s[6:7], s[0:1], s[12:13]
+; GFX6-NEXT:    v_mov_b32_e32 v0, s14
+; GFX6-NEXT:    v_mul_hi_u32 v1, s6, v0
+; GFX6-NEXT:    v_mov_b32_e32 v2, s15
+; GFX6-NEXT:    v_mul_hi_u32 v3, s6, v2
+; GFX6-NEXT:    s_mov_b32 s0, s4
+; GFX6-NEXT:    v_readfirstlane_b32 s4, v1
+; GFX6-NEXT:    v_mul_hi_u32 v1, s7, v2
+; GFX6-NEXT:    s_mul_i32 s1, s6, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v3
+; GFX6-NEXT:    v_mul_hi_u32 v0, s7, v0
+; GFX6-NEXT:    s_add_u32 s1, s16, s1
+; GFX6-NEXT:    s_addc_u32 s4, 0, s4
+; GFX6-NEXT:    s_mul_i32 s15, s7, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
+; GFX6-NEXT:    s_add_u32 s1, s1, s15
+; GFX6-NEXT:    s_addc_u32 s1, s4, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX6-NEXT:    s_addc_u32 s4, s4, 0
+; GFX6-NEXT:    s_mul_i32 s14, s7, s14
+; GFX6-NEXT:    s_add_u32 s14, s1, s14
+; GFX6-NEXT:    v_mov_b32_e32 v0, s14
+; GFX6-NEXT:    v_mul_hi_u32 v0, s10, v0
+; GFX6-NEXT:    s_addc_u32 s15, 0, s4
+; GFX6-NEXT:    s_mov_b32 s1, s5
+; GFX6-NEXT:    s_mul_i32 s4, s10, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s5, v0
+; GFX6-NEXT:    s_add_i32 s4, s5, s4
+; GFX6-NEXT:    s_mul_i32 s5, s11, s14
+; GFX6-NEXT:    s_add_i32 s16, s4, s5
+; GFX6-NEXT:    s_sub_i32 s17, s7, s16
+; GFX6-NEXT:    s_mul_i32 s4, s10, s14
+; GFX6-NEXT:    s_sub_i32 s6, s6, s4
+; GFX6-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX6-NEXT:    s_or_b32 s18, s4, s5
+; GFX6-NEXT:    s_cmp_lg_u32 s18, 0
+; GFX6-NEXT:    s_subb_u32 s17, s17, s11
+; GFX6-NEXT:    s_sub_i32 s19, s6, s10
+; GFX6-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX6-NEXT:    s_or_b32 s4, s4, s5
+; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX6-NEXT:    s_subb_u32 s4, s17, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s4, s11
+; GFX6-NEXT:    s_cselect_b32 s5, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s19, s10
+; GFX6-NEXT:    s_cselect_b32 s17, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s4, s11
+; GFX6-NEXT:    s_cselect_b32 s4, s17, s5
+; GFX6-NEXT:    s_add_u32 s5, s14, 1
+; GFX6-NEXT:    s_addc_u32 s17, s15, 0
+; GFX6-NEXT:    s_add_u32 s19, s14, 2
+; GFX6-NEXT:    s_addc_u32 s20, s15, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX6-NEXT:    s_cselect_b32 s4, s19, s5
+; GFX6-NEXT:    s_cselect_b32 s5, s20, s17
+; GFX6-NEXT:    s_cmp_lg_u32 s18, 0
+; GFX6-NEXT:    s_subb_u32 s7, s7, s16
+; GFX6-NEXT:    s_cmp_ge_u32 s7, s11
+; GFX6-NEXT:    s_cselect_b32 s16, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s6, s10
+; GFX6-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s7, s11
+; GFX6-NEXT:    s_cselect_b32 s6, s6, s16
+; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX6-NEXT:    s_cselect_b32 s5, s5, s15
+; GFX6-NEXT:    s_cselect_b32 s4, s4, s14
+; GFX6-NEXT:    s_xor_b64 s[6:7], s[12:13], s[8:9]
+; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
+; GFX6-NEXT:    s_sub_u32 s4, s4, s6
+; GFX6-NEXT:    s_subb_u32 s5, s5, s7
+; GFX6-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NEXT:    v_mov_b32_e32 v1, s5
+; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: sdiv_i64_pow2_shl_denom:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x34
-; GFX9-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_lshl_b64 s[0:1], 0x1000, s0
-; GFX9-NEXT:    s_ashr_i32 s2, s1, 31
-; GFX9-NEXT:    s_add_u32 s0, s0, s2
-; GFX9-NEXT:    s_mov_b32 s3, s2
-; GFX9-NEXT:    s_addc_u32 s1, s1, s2
-; GFX9-NEXT:    s_xor_b64 s[6:7], s[0:1], s[2:3]
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s7
-; GFX9-NEXT:    s_sub_u32 s0, 0, s6
-; GFX9-NEXT:    s_subb_u32 s1, 0, s7
+; GFX9-NEXT:    s_ashr_i32 s6, s1, 31
+; GFX9-NEXT:    s_add_u32 s0, s0, s6
+; GFX9-NEXT:    s_mov_b32 s7, s6
+; GFX9-NEXT:    s_addc_u32 s1, s1, s6
+; GFX9-NEXT:    s_xor_b64 s[8:9], s[0:1], s[6:7]
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s9
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT:    s_sub_u32 s10, 0, s8
+; GFX9-NEXT:    s_subb_u32 s11, 0, s9
 ; GFX9-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
@@ -7949,130 +7982,122 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    v_madmk_f32 v1, v2, 0xcf800000, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s4, v2
-; GFX9-NEXT:    v_readfirstlane_b32 s5, v1
-; GFX9-NEXT:    s_mul_i32 s12, s0, s4
-; GFX9-NEXT:    s_mul_hi_u32 s14, s0, s5
-; GFX9-NEXT:    s_mul_i32 s13, s1, s5
-; GFX9-NEXT:    s_add_i32 s12, s14, s12
-; GFX9-NEXT:    s_mul_i32 s15, s0, s5
-; GFX9-NEXT:    s_add_i32 s12, s12, s13
-; GFX9-NEXT:    s_mul_hi_u32 s14, s5, s15
-; GFX9-NEXT:    s_mul_hi_u32 s13, s5, s12
-; GFX9-NEXT:    s_mul_i32 s5, s5, s12
-; GFX9-NEXT:    s_add_u32 s5, s14, s5
+; GFX9-NEXT:    v_readfirstlane_b32 s12, v2
+; GFX9-NEXT:    v_readfirstlane_b32 s4, v1
+; GFX9-NEXT:    s_mul_i32 s5, s10, s12
+; GFX9-NEXT:    s_mul_hi_u32 s14, s10, s4
+; GFX9-NEXT:    s_mul_i32 s13, s11, s4
+; GFX9-NEXT:    s_add_i32 s5, s14, s5
+; GFX9-NEXT:    s_mul_i32 s15, s10, s4
+; GFX9-NEXT:    s_add_i32 s5, s5, s13
+; GFX9-NEXT:    s_mul_hi_u32 s14, s4, s15
+; GFX9-NEXT:    s_mul_i32 s16, s4, s5
+; GFX9-NEXT:    s_mul_hi_u32 s13, s4, s5
+; GFX9-NEXT:    s_add_u32 s14, s14, s16
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s13
-; GFX9-NEXT:    s_mul_hi_u32 s16, s4, s15
-; GFX9-NEXT:    s_mul_i32 s15, s4, s15
-; GFX9-NEXT:    s_add_u32 s5, s5, s15
-; GFX9-NEXT:    s_mul_hi_u32 s14, s4, s12
-; GFX9-NEXT:    s_addc_u32 s5, s13, s16
-; GFX9-NEXT:    s_addc_u32 s13, s14, 0
-; GFX9-NEXT:    s_mul_i32 s12, s4, s12
-; GFX9-NEXT:    s_add_u32 s5, s5, s12
-; GFX9-NEXT:    s_addc_u32 s12, 0, s13
-; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s5, v1
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s4, s4, s12
-; GFX9-NEXT:    v_readfirstlane_b32 s12, v1
-; GFX9-NEXT:    s_mul_i32 s5, s0, s4
-; GFX9-NEXT:    s_mul_hi_u32 s13, s0, s12
-; GFX9-NEXT:    s_add_i32 s5, s13, s5
-; GFX9-NEXT:    s_mul_i32 s1, s1, s12
-; GFX9-NEXT:    s_add_i32 s5, s5, s1
-; GFX9-NEXT:    s_mul_i32 s0, s0, s12
-; GFX9-NEXT:    s_mul_hi_u32 s13, s4, s0
-; GFX9-NEXT:    s_mul_i32 s14, s4, s0
-; GFX9-NEXT:    s_mul_i32 s16, s12, s5
-; GFX9-NEXT:    s_mul_hi_u32 s0, s12, s0
-; GFX9-NEXT:    s_mul_hi_u32 s15, s12, s5
-; GFX9-NEXT:    s_add_u32 s0, s0, s16
-; GFX9-NEXT:    s_addc_u32 s12, 0, s15
-; GFX9-NEXT:    s_add_u32 s0, s0, s14
-; GFX9-NEXT:    s_mul_hi_u32 s1, s4, s5
-; GFX9-NEXT:    s_addc_u32 s0, s12, s13
-; GFX9-NEXT:    s_addc_u32 s1, s1, 0
-; GFX9-NEXT:    s_mul_i32 s5, s4, s5
-; GFX9-NEXT:    s_add_u32 s0, s0, s5
-; GFX9-NEXT:    s_addc_u32 s1, 0, s1
-; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s0, v1
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s12, s4, s1
-; GFX9-NEXT:    s_ashr_i32 s4, s11, 31
-; GFX9-NEXT:    s_add_u32 s0, s10, s4
+; GFX9-NEXT:    s_mul_hi_u32 s17, s12, s15
+; GFX9-NEXT:    s_mul_i32 s15, s12, s15
+; GFX9-NEXT:    s_add_u32 s14, s14, s15
+; GFX9-NEXT:    s_mul_hi_u32 s16, s12, s5
+; GFX9-NEXT:    s_addc_u32 s13, s13, s17
+; GFX9-NEXT:    s_addc_u32 s14, s16, 0
+; GFX9-NEXT:    s_mul_i32 s5, s12, s5
+; GFX9-NEXT:    s_add_u32 s5, s13, s5
+; GFX9-NEXT:    s_addc_u32 s13, 0, s14
+; GFX9-NEXT:    s_add_i32 s14, s4, s5
+; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_addc_u32 s12, s12, s13
+; GFX9-NEXT:    s_mul_i32 s4, s10, s12
+; GFX9-NEXT:    s_mul_hi_u32 s5, s10, s14
+; GFX9-NEXT:    s_add_i32 s4, s5, s4
+; GFX9-NEXT:    s_mul_i32 s11, s11, s14
+; GFX9-NEXT:    s_add_i32 s4, s4, s11
+; GFX9-NEXT:    s_mul_i32 s10, s10, s14
+; GFX9-NEXT:    s_mul_hi_u32 s11, s12, s10
+; GFX9-NEXT:    s_mul_i32 s13, s12, s10
+; GFX9-NEXT:    s_mul_i32 s16, s14, s4
+; GFX9-NEXT:    s_mul_hi_u32 s10, s14, s10
+; GFX9-NEXT:    s_mul_hi_u32 s15, s14, s4
+; GFX9-NEXT:    s_add_u32 s10, s10, s16
+; GFX9-NEXT:    s_addc_u32 s15, 0, s15
+; GFX9-NEXT:    s_add_u32 s10, s10, s13
+; GFX9-NEXT:    s_mul_hi_u32 s5, s12, s4
+; GFX9-NEXT:    s_addc_u32 s10, s15, s11
+; GFX9-NEXT:    s_addc_u32 s5, s5, 0
+; GFX9-NEXT:    s_mul_i32 s4, s12, s4
+; GFX9-NEXT:    s_add_u32 s4, s10, s4
+; GFX9-NEXT:    s_addc_u32 s10, 0, s5
+; GFX9-NEXT:    s_add_i32 s14, s14, s4
+; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_addc_u32 s10, s12, s10
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
+; GFX9-NEXT:    s_add_u32 s2, s2, s4
 ; GFX9-NEXT:    s_mov_b32 s5, s4
-; GFX9-NEXT:    s_addc_u32 s1, s11, s4
-; GFX9-NEXT:    s_xor_b64 s[10:11], s[0:1], s[4:5]
-; GFX9-NEXT:    v_readfirstlane_b32 s13, v1
-; GFX9-NEXT:    s_mul_i32 s1, s10, s12
-; GFX9-NEXT:    s_mul_hi_u32 s14, s10, s13
-; GFX9-NEXT:    s_mul_hi_u32 s0, s10, s12
-; GFX9-NEXT:    s_add_u32 s1, s14, s1
-; GFX9-NEXT:    s_addc_u32 s0, 0, s0
-; GFX9-NEXT:    s_mul_hi_u32 s15, s11, s13
-; GFX9-NEXT:    s_mul_i32 s13, s11, s13
-; GFX9-NEXT:    s_add_u32 s1, s1, s13
-; GFX9-NEXT:    s_mul_hi_u32 s14, s11, s12
-; GFX9-NEXT:    s_addc_u32 s0, s0, s15
-; GFX9-NEXT:    s_addc_u32 s1, s14, 0
-; GFX9-NEXT:    s_mul_i32 s12, s11, s12
-; GFX9-NEXT:    s_add_u32 s12, s0, s12
-; GFX9-NEXT:    s_addc_u32 s13, 0, s1
-; GFX9-NEXT:    s_mul_i32 s0, s6, s13
-; GFX9-NEXT:    s_mul_hi_u32 s1, s6, s12
-; GFX9-NEXT:    s_add_i32 s0, s1, s0
-; GFX9-NEXT:    s_mul_i32 s1, s7, s12
-; GFX9-NEXT:    s_add_i32 s14, s0, s1
-; GFX9-NEXT:    s_mul_i32 s1, s6, s12
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
-; GFX9-NEXT:    s_sub_i32 s0, s11, s14
-; GFX9-NEXT:    v_sub_co_u32_e32 v1, vcc, s10, v1
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_subb_u32 s10, s0, s7
-; GFX9-NEXT:    v_subrev_co_u32_e64 v2, s[0:1], s6, v1
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    s_subb_u32 s10, s10, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s10, s7
-; GFX9-NEXT:    s_cselect_b32 s15, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s6, v2
-; GFX9-NEXT:    s_cmp_eq_u32 s10, s7
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, -1, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v3, s15
-; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v3, v2, s[0:1]
-; GFX9-NEXT:    s_add_u32 s0, s12, 1
-; GFX9-NEXT:    s_addc_u32 s10, s13, 0
-; GFX9-NEXT:    s_add_u32 s1, s12, 2
-; GFX9-NEXT:    s_addc_u32 s15, s13, 0
-; GFX9-NEXT:    v_mov_b32_e32 v3, s0
-; GFX9-NEXT:    v_mov_b32_e32 v4, s1
-; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v3, v4, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v3, s10
-; GFX9-NEXT:    v_mov_b32_e32 v4, s15
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v4, s[0:1]
-; GFX9-NEXT:    s_subb_u32 s0, s11, s14
-; GFX9-NEXT:    s_cmp_ge_u32 s0, s7
-; GFX9-NEXT:    s_cselect_b32 s1, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s6, v1
-; GFX9-NEXT:    s_cmp_eq_u32 s0, s7
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v4, s1
-; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v4, v1, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v4, s13
-; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v4, v3, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v3, s12
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v2, vcc
-; GFX9-NEXT:    s_xor_b64 s[0:1], s[4:5], s[2:3]
-; GFX9-NEXT:    v_xor_b32_e32 v2, s0, v2
-; GFX9-NEXT:    v_xor_b32_e32 v3, s1, v1
-; GFX9-NEXT:    v_mov_b32_e32 v4, s1
-; GFX9-NEXT:    v_subrev_co_u32_e32 v1, vcc, s0, v2
-; GFX9-NEXT:    v_subb_co_u32_e32 v2, vcc, v3, v4, vcc
-; GFX9-NEXT:    global_store_dwordx2 v0, v[1:2], s[8:9]
+; GFX9-NEXT:    s_addc_u32 s3, s3, s4
+; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
+; GFX9-NEXT:    s_mul_i32 s12, s2, s10
+; GFX9-NEXT:    s_mul_hi_u32 s13, s2, s14
+; GFX9-NEXT:    s_mul_hi_u32 s11, s2, s10
+; GFX9-NEXT:    s_add_u32 s12, s13, s12
+; GFX9-NEXT:    s_addc_u32 s11, 0, s11
+; GFX9-NEXT:    s_mul_hi_u32 s15, s3, s14
+; GFX9-NEXT:    s_mul_i32 s14, s3, s14
+; GFX9-NEXT:    s_add_u32 s12, s12, s14
+; GFX9-NEXT:    s_mul_hi_u32 s13, s3, s10
+; GFX9-NEXT:    s_addc_u32 s11, s11, s15
+; GFX9-NEXT:    s_addc_u32 s12, s13, 0
+; GFX9-NEXT:    s_mul_i32 s10, s3, s10
+; GFX9-NEXT:    s_add_u32 s14, s11, s10
+; GFX9-NEXT:    s_addc_u32 s15, 0, s12
+; GFX9-NEXT:    s_mul_i32 s10, s8, s15
+; GFX9-NEXT:    s_mul_hi_u32 s11, s8, s14
+; GFX9-NEXT:    s_add_i32 s10, s11, s10
+; GFX9-NEXT:    s_mul_i32 s11, s9, s14
+; GFX9-NEXT:    s_add_i32 s16, s10, s11
+; GFX9-NEXT:    s_sub_i32 s12, s3, s16
+; GFX9-NEXT:    s_mul_i32 s10, s8, s14
+; GFX9-NEXT:    s_sub_i32 s2, s2, s10
+; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GFX9-NEXT:    s_subb_u32 s17, s12, s9
+; GFX9-NEXT:    s_sub_i32 s18, s2, s8
+; GFX9-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
+; GFX9-NEXT:    s_subb_u32 s12, s17, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s12, s9
+; GFX9-NEXT:    s_cselect_b32 s13, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s18, s8
+; GFX9-NEXT:    s_cselect_b32 s17, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s12, s9
+; GFX9-NEXT:    s_cselect_b32 s12, s17, s13
+; GFX9-NEXT:    s_add_u32 s13, s14, 1
+; GFX9-NEXT:    s_addc_u32 s17, s15, 0
+; GFX9-NEXT:    s_add_u32 s18, s14, 2
+; GFX9-NEXT:    s_addc_u32 s19, s15, 0
+; GFX9-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX9-NEXT:    s_cselect_b32 s12, s18, s13
+; GFX9-NEXT:    s_cselect_b32 s13, s19, s17
+; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GFX9-NEXT:    s_subb_u32 s3, s3, s16
+; GFX9-NEXT:    s_cmp_ge_u32 s3, s9
+; GFX9-NEXT:    s_cselect_b32 s10, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s2, s8
+; GFX9-NEXT:    s_cselect_b32 s2, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s3, s9
+; GFX9-NEXT:    s_cselect_b32 s2, s2, s10
+; GFX9-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX9-NEXT:    s_cselect_b32 s3, s13, s15
+; GFX9-NEXT:    s_cselect_b32 s2, s12, s14
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
+; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
+; GFX9-NEXT:    s_sub_u32 s2, s2, s4
+; GFX9-NEXT:    s_subb_u32 s3, s3, s5
+; GFX9-NEXT:    v_mov_b32_e32 v1, s2
+; GFX9-NEXT:    v_mov_b32_e32 v2, s3
+; GFX9-NEXT:    global_store_dwordx2 v0, v[1:2], s[0:1]
 ; GFX9-NEXT:    s_endpgm
   %shl.y = shl i64 4096, %y
   %r = sdiv i64 %x, %shl.y
@@ -8276,276 +8301,343 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-LABEL: sdiv_v2i64_pow2_shl_denom:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0xd
-; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x9
-; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_lshl_b64 s[0:1], 0x1000, s12
-; GFX6-NEXT:    s_lshl_b64 s[14:15], 0x1000, s14
-; GFX6-NEXT:    s_ashr_i32 s12, s1, 31
-; GFX6-NEXT:    s_add_u32 s0, s0, s12
-; GFX6-NEXT:    s_mov_b32 s13, s12
-; GFX6-NEXT:    s_addc_u32 s1, s1, s12
-; GFX6-NEXT:    s_xor_b64 s[2:3], s[0:1], s[12:13]
-; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GFX6-NEXT:    s_sub_u32 s0, 0, s2
-; GFX6-NEXT:    s_subb_u32 s1, 0, s3
-; GFX6-NEXT:    s_ashr_i32 s16, s9, 31
+; GFX6-NEXT:    s_lshl_b64 s[6:7], 0x1000, s12
+; GFX6-NEXT:    s_lshl_b64 s[0:1], 0x1000, s14
+; GFX6-NEXT:    s_ashr_i32 s2, s7, 31
+; GFX6-NEXT:    s_add_u32 s6, s6, s2
+; GFX6-NEXT:    s_mov_b32 s3, s2
+; GFX6-NEXT:    s_addc_u32 s7, s7, s2
+; GFX6-NEXT:    s_xor_b64 s[6:7], s[6:7], s[2:3]
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s7
+; GFX6-NEXT:    s_sub_u32 s14, 0, s6
+; GFX6-NEXT:    s_subb_u32 s15, 0, s7
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX6-NEXT:    s_mov_b32 s17, s16
 ; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
-; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v2, s0, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v5, s1, v0
-; GFX6-NEXT:    v_mul_lo_u32 v4, s0, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GFX6-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GFX6-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v6, v1, v4
-; GFX6-NEXT:    v_mul_lo_u32 v4, v1, v4
-; GFX6-NEXT:    v_mul_hi_u32 v8, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v6, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, s0, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v4, s1, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, s0, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GFX6-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    s_add_u32 s0, s8, s16
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    s_addc_u32 s1, s9, s16
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    s_xor_b64 s[8:9], s[0:1], s[16:17]
-; GFX6-NEXT:    v_mul_lo_u32 v2, s8, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s8, v0
-; GFX6-NEXT:    v_mul_hi_u32 v4, s8, v1
-; GFX6-NEXT:    v_mul_hi_u32 v5, s9, v1
-; GFX6-NEXT:    v_mul_lo_u32 v1, s9, v1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v4, s9, v0
+; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GFX6-NEXT:    v_mul_hi_u32 v2, s14, v0
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v0
+; GFX6-NEXT:    s_mul_i32 s13, s14, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s19, v2
+; GFX6-NEXT:    s_mul_i32 s17, s15, s12
+; GFX6-NEXT:    s_mul_i32 s18, s14, s12
+; GFX6-NEXT:    s_add_i32 s13, s19, s13
+; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s18
+; GFX6-NEXT:    s_add_i32 s13, s13, s17
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s13
+; GFX6-NEXT:    v_mul_hi_u32 v4, v1, s18
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v3
+; GFX6-NEXT:    s_mul_i32 s20, s12, s13
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s13
+; GFX6-NEXT:    s_add_u32 s17, s17, s20
+; GFX6-NEXT:    v_readfirstlane_b32 s20, v0
+; GFX6-NEXT:    s_mul_i32 s18, s16, s18
+; GFX6-NEXT:    s_addc_u32 s20, 0, s20
+; GFX6-NEXT:    v_readfirstlane_b32 s19, v4
+; GFX6-NEXT:    s_add_u32 s17, s17, s18
+; GFX6-NEXT:    s_addc_u32 s17, s20, s19
+; GFX6-NEXT:    v_readfirstlane_b32 s18, v1
+; GFX6-NEXT:    s_addc_u32 s18, s18, 0
+; GFX6-NEXT:    s_mul_i32 s13, s16, s13
+; GFX6-NEXT:    s_add_u32 s13, s17, s13
+; GFX6-NEXT:    s_addc_u32 s17, 0, s18
+; GFX6-NEXT:    s_add_i32 s18, s12, s13
+; GFX6-NEXT:    v_mov_b32_e32 v0, s18
+; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s14, v0
+; GFX6-NEXT:    s_or_b32 s12, s12, s13
+; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX6-NEXT:    s_addc_u32 s16, s16, s17
+; GFX6-NEXT:    s_mul_i32 s12, s14, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s13, v0
+; GFX6-NEXT:    s_add_i32 s12, s13, s12
+; GFX6-NEXT:    s_mul_i32 s15, s15, s18
+; GFX6-NEXT:    s_mul_i32 s13, s14, s18
+; GFX6-NEXT:    s_add_i32 s12, s12, s15
+; GFX6-NEXT:    v_mov_b32_e32 v2, s13
+; GFX6-NEXT:    v_mov_b32_e32 v0, s12
+; GFX6-NEXT:    v_mul_hi_u32 v3, s16, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s18, v2
+; GFX6-NEXT:    v_mul_hi_u32 v1, s16, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s18, v0
+; GFX6-NEXT:    s_mul_i32 s15, s18, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s19, v2
+; GFX6-NEXT:    s_add_u32 s15, s19, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v0
+; GFX6-NEXT:    s_mul_i32 s13, s16, s13
+; GFX6-NEXT:    s_addc_u32 s17, 0, s17
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v3
+; GFX6-NEXT:    s_add_u32 s13, s15, s13
+; GFX6-NEXT:    s_addc_u32 s13, s17, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX6-NEXT:    s_addc_u32 s14, s14, 0
+; GFX6-NEXT:    s_mul_i32 s12, s16, s12
+; GFX6-NEXT:    s_add_u32 s12, s13, s12
+; GFX6-NEXT:    s_addc_u32 s14, 0, s14
+; GFX6-NEXT:    s_add_i32 s15, s18, s12
+; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX6-NEXT:    s_or_b32 s12, s12, s13
+; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX6-NEXT:    s_addc_u32 s14, s16, s14
+; GFX6-NEXT:    s_ashr_i32 s12, s9, 31
+; GFX6-NEXT:    s_add_u32 s8, s8, s12
+; GFX6-NEXT:    s_mov_b32 s13, s12
+; GFX6-NEXT:    s_addc_u32 s9, s9, s12
+; GFX6-NEXT:    s_xor_b64 s[8:9], s[8:9], s[12:13]
+; GFX6-NEXT:    v_mov_b32_e32 v0, s14
+; GFX6-NEXT:    v_mul_hi_u32 v1, s8, v0
+; GFX6-NEXT:    v_mov_b32_e32 v2, s15
+; GFX6-NEXT:    v_mul_hi_u32 v3, s8, v2
+; GFX6-NEXT:    s_mul_i32 s16, s8, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v1
+; GFX6-NEXT:    v_mul_hi_u32 v1, s9, v2
+; GFX6-NEXT:    v_readfirstlane_b32 s18, v3
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s9, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, s2, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s2, v0
-; GFX6-NEXT:    v_mul_lo_u32 v4, s3, v0
-; GFX6-NEXT:    v_mov_b32_e32 v5, s3
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, s2, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
-; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s9, v2
-; GFX6-NEXT:    v_sub_i32_e32 v3, vcc, s8, v3
-; GFX6-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GFX6-NEXT:    v_subrev_i32_e64 v5, s[0:1], s2, v3
-; GFX6-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v4
-; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v5
-; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s3, v4
-; GFX6-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
-; GFX6-NEXT:    v_add_i32_e64 v5, s[0:1], 1, v0
-; GFX6-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
-; GFX6-NEXT:    v_add_i32_e64 v7, s[0:1], 2, v0
-; GFX6-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
-; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GFX6-NEXT:    v_cndmask_b32_e64 v4, v5, v7, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v5, v6, v8, s[0:1]
-; GFX6-NEXT:    s_xor_b64 s[0:1], s[16:17], s[12:13]
-; GFX6-NEXT:    s_ashr_i32 s8, s15, 31
-; GFX6-NEXT:    s_add_u32 s12, s14, s8
-; GFX6-NEXT:    v_mov_b32_e32 v6, s9
-; GFX6-NEXT:    s_mov_b32 s9, s8
-; GFX6-NEXT:    s_addc_u32 s13, s15, s8
-; GFX6-NEXT:    s_xor_b64 s[12:13], s[12:13], s[8:9]
-; GFX6-NEXT:    v_subb_u32_e32 v2, vcc, v6, v2, vcc
-; GFX6-NEXT:    v_cvt_f32_u32_e32 v6, s12
-; GFX6-NEXT:    v_cvt_f32_u32_e32 v7, s13
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s3, v2
-; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s2, v3
-; GFX6-NEXT:    v_mac_f32_e32 v6, 0x4f800000, v7
-; GFX6-NEXT:    v_rcp_f32_e32 v6, v6
-; GFX6-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s3, v2
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, v8, v3, vcc
-; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; GFX6-NEXT:    v_mul_f32_e32 v2, 0x5f7ffffc, v6
-; GFX6-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v2
-; GFX6-NEXT:    v_trunc_f32_e32 v3, v3
-; GFX6-NEXT:    v_mac_f32_e32 v2, 0xcf800000, v3
-; GFX6-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GFX6-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GFX6-NEXT:    s_sub_u32 s2, 0, s12
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
-; GFX6-NEXT:    v_mul_hi_u32 v4, s2, v2
-; GFX6-NEXT:    v_mul_lo_u32 v5, s2, v3
-; GFX6-NEXT:    s_subb_u32 s3, 0, s13
-; GFX6-NEXT:    v_mul_lo_u32 v6, s3, v2
-; GFX6-NEXT:    v_xor_b32_e32 v0, s0, v0
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GFX6-NEXT:    v_mul_lo_u32 v5, s2, v2
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
-; GFX6-NEXT:    v_mul_lo_u32 v6, v2, v4
-; GFX6-NEXT:    v_mul_hi_u32 v7, v2, v5
-; GFX6-NEXT:    v_mul_hi_u32 v8, v2, v4
-; GFX6-NEXT:    v_mul_hi_u32 v9, v3, v4
-; GFX6-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v8, v3, v5
-; GFX6-NEXT:    v_mul_hi_u32 v5, v3, v5
-; GFX6-NEXT:    v_xor_b32_e32 v1, s1, v1
-; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v6, v8
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v7, v5, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, 0, v9, vcc
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v3, v5, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v4, s2, v3
-; GFX6-NEXT:    v_mul_hi_u32 v5, s2, v2
-; GFX6-NEXT:    v_mul_lo_u32 v6, s3, v2
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v5
-; GFX6-NEXT:    v_mul_lo_u32 v5, s2, v2
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
-; GFX6-NEXT:    v_mul_lo_u32 v8, v2, v4
-; GFX6-NEXT:    v_mul_hi_u32 v9, v2, v5
-; GFX6-NEXT:    v_mul_hi_u32 v10, v2, v4
-; GFX6-NEXT:    v_mul_hi_u32 v7, v3, v5
-; GFX6-NEXT:    v_mul_lo_u32 v5, v3, v5
-; GFX6-NEXT:    v_mul_hi_u32 v6, v3, v4
-; GFX6-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
-; GFX6-NEXT:    v_addc_u32_e32 v9, vcc, 0, v10, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v9, v7, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GFX6-NEXT:    s_ashr_i32 s2, s11, 31
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
-; GFX6-NEXT:    s_add_u32 s10, s10, s2
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT:    s_mov_b32 s3, s2
-; GFX6-NEXT:    s_addc_u32 s11, s11, s2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v3, v5, vcc
-; GFX6-NEXT:    s_xor_b64 s[10:11], s[10:11], s[2:3]
-; GFX6-NEXT:    v_mul_lo_u32 v4, s10, v3
-; GFX6-NEXT:    v_mul_hi_u32 v5, s10, v2
-; GFX6-NEXT:    v_mul_hi_u32 v7, s10, v3
-; GFX6-NEXT:    v_mul_hi_u32 v8, s11, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, s11, v3
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v7, s11, v2
-; GFX6-NEXT:    v_mul_hi_u32 v2, s11, v2
-; GFX6-NEXT:    v_mov_b32_e32 v6, s1
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v7
-; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v5, v2, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v4, s12, v3
-; GFX6-NEXT:    v_mul_hi_u32 v5, s12, v2
-; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
-; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v6, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v6, s13, v2
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v5
-; GFX6-NEXT:    v_mul_lo_u32 v5, s12, v2
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
-; GFX6-NEXT:    v_sub_i32_e32 v6, vcc, s11, v4
-; GFX6-NEXT:    v_mov_b32_e32 v7, s13
-; GFX6-NEXT:    v_sub_i32_e32 v5, vcc, s10, v5
-; GFX6-NEXT:    v_subb_u32_e64 v6, s[0:1], v6, v7, vcc
-; GFX6-NEXT:    v_subrev_i32_e64 v7, s[0:1], s12, v5
-; GFX6-NEXT:    v_subbrev_u32_e64 v6, s[0:1], 0, v6, s[0:1]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s13, v6
-; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[0:1]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[0:1], s12, v7
-; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[0:1], s13, v6
-; GFX6-NEXT:    v_cndmask_b32_e64 v6, v8, v7, s[0:1]
-; GFX6-NEXT:    v_add_i32_e64 v7, s[0:1], 1, v2
-; GFX6-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v3, s[0:1]
-; GFX6-NEXT:    v_add_i32_e64 v9, s[0:1], 2, v2
-; GFX6-NEXT:    v_addc_u32_e64 v10, s[0:1], 0, v3, s[0:1]
-; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GFX6-NEXT:    v_cndmask_b32_e64 v6, v7, v9, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v7, v8, v10, s[0:1]
-; GFX6-NEXT:    v_mov_b32_e32 v8, s11
-; GFX6-NEXT:    v_subb_u32_e32 v4, vcc, v8, v4, vcc
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s13, v4
-; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s12, v5
-; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v4
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, v8, v5, vcc
-; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
-; GFX6-NEXT:    s_xor_b64 s[0:1], s[2:3], s[8:9]
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v7, vcc
-; GFX6-NEXT:    v_xor_b32_e32 v2, s0, v2
-; GFX6-NEXT:    v_xor_b32_e32 v3, s1, v3
-; GFX6-NEXT:    v_mov_b32_e32 v4, s1
-; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s0, v2
-; GFX6-NEXT:    v_subb_u32_e32 v3, vcc, v3, v4, vcc
-; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; GFX6-NEXT:    s_add_u32 s16, s18, s16
+; GFX6-NEXT:    s_addc_u32 s17, 0, s17
+; GFX6-NEXT:    s_mul_i32 s15, s9, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s18, v1
+; GFX6-NEXT:    s_add_u32 s15, s16, s15
+; GFX6-NEXT:    s_addc_u32 s15, s17, s18
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v0
+; GFX6-NEXT:    s_addc_u32 s16, s16, 0
+; GFX6-NEXT:    s_mul_i32 s14, s9, s14
+; GFX6-NEXT:    s_add_u32 s17, s15, s14
+; GFX6-NEXT:    v_mov_b32_e32 v0, s17
+; GFX6-NEXT:    v_mul_hi_u32 v0, s6, v0
+; GFX6-NEXT:    s_addc_u32 s16, 0, s16
+; GFX6-NEXT:    s_mul_i32 s14, s6, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v0
+; GFX6-NEXT:    s_add_i32 s14, s15, s14
+; GFX6-NEXT:    s_mul_i32 s15, s7, s17
+; GFX6-NEXT:    s_add_i32 s18, s14, s15
+; GFX6-NEXT:    s_sub_i32 s19, s9, s18
+; GFX6-NEXT:    s_mul_i32 s14, s6, s17
+; GFX6-NEXT:    s_sub_i32 s8, s8, s14
+; GFX6-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; GFX6-NEXT:    s_or_b32 s20, s14, s15
+; GFX6-NEXT:    s_cmp_lg_u32 s20, 0
+; GFX6-NEXT:    s_subb_u32 s19, s19, s7
+; GFX6-NEXT:    s_sub_i32 s21, s8, s6
+; GFX6-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; GFX6-NEXT:    s_or_b32 s14, s14, s15
+; GFX6-NEXT:    s_cmp_lg_u32 s14, 0
+; GFX6-NEXT:    s_subb_u32 s14, s19, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s14, s7
+; GFX6-NEXT:    s_cselect_b32 s15, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s21, s6
+; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s14, s7
+; GFX6-NEXT:    s_cselect_b32 s14, s19, s15
+; GFX6-NEXT:    s_add_u32 s15, s17, 1
+; GFX6-NEXT:    s_addc_u32 s19, s16, 0
+; GFX6-NEXT:    s_add_u32 s21, s17, 2
+; GFX6-NEXT:    s_addc_u32 s22, s16, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s14, 0
+; GFX6-NEXT:    s_cselect_b32 s14, s21, s15
+; GFX6-NEXT:    s_cselect_b32 s15, s22, s19
+; GFX6-NEXT:    s_cmp_lg_u32 s20, 0
+; GFX6-NEXT:    s_subb_u32 s9, s9, s18
+; GFX6-NEXT:    s_cmp_ge_u32 s9, s7
+; GFX6-NEXT:    s_cselect_b32 s18, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s8, s6
+; GFX6-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s9, s7
+; GFX6-NEXT:    s_cselect_b32 s6, s6, s18
+; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX6-NEXT:    s_cselect_b32 s7, s15, s16
+; GFX6-NEXT:    s_cselect_b32 s6, s14, s17
+; GFX6-NEXT:    s_xor_b64 s[2:3], s[12:13], s[2:3]
+; GFX6-NEXT:    s_xor_b64 s[6:7], s[6:7], s[2:3]
+; GFX6-NEXT:    s_sub_u32 s14, s6, s2
+; GFX6-NEXT:    s_subb_u32 s15, s7, s3
+; GFX6-NEXT:    s_ashr_i32 s6, s1, 31
+; GFX6-NEXT:    s_add_u32 s0, s0, s6
+; GFX6-NEXT:    s_mov_b32 s7, s6
+; GFX6-NEXT:    s_addc_u32 s1, s1, s6
+; GFX6-NEXT:    s_xor_b64 s[8:9], s[0:1], s[6:7]
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s9
+; GFX6-NEXT:    s_sub_u32 s12, 0, s8
+; GFX6-NEXT:    s_subb_u32 s13, 0, s9
+; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
+; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
+; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
+; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
+; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GFX6-NEXT:    v_mul_hi_u32 v2, s12, v0
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
+; GFX6-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX6-NEXT:    s_mul_i32 s1, s12, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s3, v2
+; GFX6-NEXT:    s_mul_i32 s0, s13, s2
+; GFX6-NEXT:    s_add_i32 s1, s3, s1
+; GFX6-NEXT:    s_add_i32 s3, s1, s0
+; GFX6-NEXT:    s_mul_i32 s17, s12, s2
+; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s3
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s17
+; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
+; GFX6-NEXT:    s_mul_i32 s4, s2, s3
+; GFX6-NEXT:    v_readfirstlane_b32 s5, v2
+; GFX6-NEXT:    v_readfirstlane_b32 s18, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, v1, s17
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s3
+; GFX6-NEXT:    s_add_u32 s4, s18, s4
+; GFX6-NEXT:    s_addc_u32 s5, 0, s5
+; GFX6-NEXT:    s_mul_i32 s17, s16, s17
+; GFX6-NEXT:    v_readfirstlane_b32 s18, v0
+; GFX6-NEXT:    s_add_u32 s4, s4, s17
+; GFX6-NEXT:    s_addc_u32 s4, s5, s18
+; GFX6-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX6-NEXT:    s_addc_u32 s5, s5, 0
+; GFX6-NEXT:    s_mul_i32 s3, s16, s3
+; GFX6-NEXT:    s_add_u32 s3, s4, s3
+; GFX6-NEXT:    s_addc_u32 s4, 0, s5
+; GFX6-NEXT:    s_add_i32 s5, s2, s3
+; GFX6-NEXT:    v_mov_b32_e32 v0, s5
+; GFX6-NEXT:    s_cselect_b64 s[2:3], 1, 0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
+; GFX6-NEXT:    s_or_b32 s2, s2, s3
+; GFX6-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX6-NEXT:    s_addc_u32 s4, s16, s4
+; GFX6-NEXT:    s_mul_i32 s2, s12, s4
+; GFX6-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX6-NEXT:    s_add_i32 s2, s3, s2
+; GFX6-NEXT:    s_mul_i32 s13, s13, s5
+; GFX6-NEXT:    s_mul_i32 s3, s12, s5
+; GFX6-NEXT:    s_add_i32 s2, s2, s13
+; GFX6-NEXT:    v_mov_b32_e32 v2, s3
+; GFX6-NEXT:    v_mov_b32_e32 v0, s2
+; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s5, v2
+; GFX6-NEXT:    v_mul_hi_u32 v1, s4, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s5, v0
+; GFX6-NEXT:    s_mul_i32 s13, s5, s2
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
+; GFX6-NEXT:    s_add_u32 s13, s17, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v0
+; GFX6-NEXT:    s_mul_i32 s3, s4, s3
+; GFX6-NEXT:    s_addc_u32 s16, 0, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v3
+; GFX6-NEXT:    s_add_u32 s3, s13, s3
+; GFX6-NEXT:    s_addc_u32 s3, s16, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
+; GFX6-NEXT:    s_addc_u32 s12, s12, 0
+; GFX6-NEXT:    s_mul_i32 s2, s4, s2
+; GFX6-NEXT:    s_add_u32 s2, s3, s2
+; GFX6-NEXT:    s_addc_u32 s12, 0, s12
+; GFX6-NEXT:    s_add_i32 s13, s5, s2
+; GFX6-NEXT:    s_cselect_b64 s[2:3], 1, 0
+; GFX6-NEXT:    s_or_b32 s2, s2, s3
+; GFX6-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX6-NEXT:    s_addc_u32 s12, s4, s12
+; GFX6-NEXT:    s_ashr_i32 s4, s11, 31
+; GFX6-NEXT:    s_add_u32 s2, s10, s4
+; GFX6-NEXT:    s_mov_b32 s5, s4
+; GFX6-NEXT:    s_addc_u32 s3, s11, s4
+; GFX6-NEXT:    s_xor_b64 s[10:11], s[2:3], s[4:5]
+; GFX6-NEXT:    v_mov_b32_e32 v0, s12
+; GFX6-NEXT:    v_mul_hi_u32 v1, s10, v0
+; GFX6-NEXT:    v_mov_b32_e32 v2, s13
+; GFX6-NEXT:    v_mul_hi_u32 v3, s10, v2
+; GFX6-NEXT:    s_mul_i32 s2, s10, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
+; GFX6-NEXT:    v_mul_hi_u32 v1, s11, v2
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v3
+; GFX6-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GFX6-NEXT:    s_add_u32 s2, s17, s2
+; GFX6-NEXT:    s_addc_u32 s16, 0, s16
+; GFX6-NEXT:    s_mul_i32 s13, s11, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v1
+; GFX6-NEXT:    s_add_u32 s2, s2, s13
+; GFX6-NEXT:    s_addc_u32 s2, s16, s17
+; GFX6-NEXT:    v_readfirstlane_b32 s13, v0
+; GFX6-NEXT:    s_addc_u32 s13, s13, 0
+; GFX6-NEXT:    s_mul_i32 s12, s11, s12
+; GFX6-NEXT:    s_add_u32 s16, s2, s12
+; GFX6-NEXT:    v_mov_b32_e32 v0, s16
+; GFX6-NEXT:    v_mul_hi_u32 v0, s8, v0
+; GFX6-NEXT:    s_addc_u32 s17, 0, s13
+; GFX6-NEXT:    s_mul_i32 s12, s8, s17
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
+; GFX6-NEXT:    v_readfirstlane_b32 s13, v0
+; GFX6-NEXT:    s_add_i32 s12, s13, s12
+; GFX6-NEXT:    s_mul_i32 s13, s9, s16
+; GFX6-NEXT:    s_add_i32 s18, s12, s13
+; GFX6-NEXT:    s_sub_i32 s19, s11, s18
+; GFX6-NEXT:    s_mul_i32 s12, s8, s16
+; GFX6-NEXT:    s_sub_i32 s10, s10, s12
+; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX6-NEXT:    s_or_b32 s20, s12, s13
+; GFX6-NEXT:    s_cmp_lg_u32 s20, 0
+; GFX6-NEXT:    s_subb_u32 s19, s19, s9
+; GFX6-NEXT:    s_sub_i32 s21, s10, s8
+; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX6-NEXT:    s_or_b32 s12, s12, s13
+; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX6-NEXT:    s_subb_u32 s12, s19, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s12, s9
+; GFX6-NEXT:    s_cselect_b32 s13, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s21, s8
+; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s12, s9
+; GFX6-NEXT:    s_cselect_b32 s12, s19, s13
+; GFX6-NEXT:    s_add_u32 s13, s16, 1
+; GFX6-NEXT:    s_addc_u32 s19, s17, 0
+; GFX6-NEXT:    s_add_u32 s21, s16, 2
+; GFX6-NEXT:    s_addc_u32 s22, s17, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX6-NEXT:    s_cselect_b32 s12, s21, s13
+; GFX6-NEXT:    s_cselect_b32 s13, s22, s19
+; GFX6-NEXT:    s_cmp_lg_u32 s20, 0
+; GFX6-NEXT:    s_subb_u32 s11, s11, s18
+; GFX6-NEXT:    s_cmp_ge_u32 s11, s9
+; GFX6-NEXT:    s_cselect_b32 s18, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s10, s8
+; GFX6-NEXT:    s_cselect_b32 s8, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s11, s9
+; GFX6-NEXT:    s_cselect_b32 s8, s8, s18
+; GFX6-NEXT:    s_cmp_lg_u32 s8, 0
+; GFX6-NEXT:    s_cselect_b32 s9, s13, s17
+; GFX6-NEXT:    s_cselect_b32 s8, s12, s16
+; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
+; GFX6-NEXT:    s_xor_b64 s[6:7], s[8:9], s[4:5]
+; GFX6-NEXT:    s_sub_u32 s4, s6, s4
+; GFX6-NEXT:    s_subb_u32 s5, s7, s5
+; GFX6-NEXT:    s_mov_b32 s2, -1
+; GFX6-NEXT:    v_mov_b32_e32 v0, s14
+; GFX6-NEXT:    v_mov_b32_e32 v1, s15
+; GFX6-NEXT:    v_mov_b32_e32 v2, s4
+; GFX6-NEXT:    v_mov_b32_e32 v3, s5
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: sdiv_v2i64_pow2_shl_denom:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x34
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], 0x1000, s12
-; GFX9-NEXT:    s_lshl_b64 s[6:7], 0x1000, s14
-; GFX9-NEXT:    s_ashr_i32 s12, s1, 31
-; GFX9-NEXT:    s_add_u32 s0, s0, s12
-; GFX9-NEXT:    s_mov_b32 s13, s12
-; GFX9-NEXT:    s_addc_u32 s1, s1, s12
-; GFX9-NEXT:    s_xor_b64 s[14:15], s[0:1], s[12:13]
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s14
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s15
-; GFX9-NEXT:    s_sub_u32 s0, 0, s14
-; GFX9-NEXT:    s_subb_u32 s1, 0, s15
+; GFX9-NEXT:    s_lshl_b64 s[6:7], 0x1000, s12
+; GFX9-NEXT:    s_lshl_b64 s[0:1], 0x1000, s14
+; GFX9-NEXT:    s_ashr_i32 s2, s7, 31
+; GFX9-NEXT:    s_add_u32 s6, s6, s2
+; GFX9-NEXT:    s_mov_b32 s3, s2
+; GFX9-NEXT:    s_addc_u32 s7, s7, s2
+; GFX9-NEXT:    s_xor_b64 s[6:7], s[6:7], s[2:3]
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s7
+; GFX9-NEXT:    s_sub_u32 s14, 0, s6
+; GFX9-NEXT:    s_subb_u32 s15, 0, s7
 ; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -8554,270 +8646,255 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GFX9-NEXT:    v_readfirstlane_b32 s16, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s12, v0
+; GFX9-NEXT:    s_mul_i32 s13, s14, s16
+; GFX9-NEXT:    s_mul_hi_u32 s18, s14, s12
+; GFX9-NEXT:    s_mul_i32 s17, s15, s12
+; GFX9-NEXT:    s_add_i32 s13, s18, s13
+; GFX9-NEXT:    s_mul_i32 s19, s14, s12
+; GFX9-NEXT:    s_add_i32 s13, s13, s17
+; GFX9-NEXT:    s_mul_hi_u32 s18, s12, s19
+; GFX9-NEXT:    s_mul_i32 s20, s12, s13
+; GFX9-NEXT:    s_mul_hi_u32 s17, s12, s13
+; GFX9-NEXT:    s_add_u32 s18, s18, s20
+; GFX9-NEXT:    s_addc_u32 s17, 0, s17
+; GFX9-NEXT:    s_mul_hi_u32 s20, s16, s19
+; GFX9-NEXT:    s_mul_i32 s19, s16, s19
+; GFX9-NEXT:    s_add_u32 s18, s18, s19
+; GFX9-NEXT:    s_mul_hi_u32 s21, s16, s13
+; GFX9-NEXT:    s_addc_u32 s17, s17, s20
+; GFX9-NEXT:    s_addc_u32 s18, s21, 0
+; GFX9-NEXT:    s_mul_i32 s13, s16, s13
+; GFX9-NEXT:    s_add_u32 s13, s17, s13
+; GFX9-NEXT:    s_addc_u32 s17, 0, s18
+; GFX9-NEXT:    s_add_i32 s18, s12, s13
+; GFX9-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
+; GFX9-NEXT:    s_addc_u32 s16, s16, s17
+; GFX9-NEXT:    s_mul_i32 s12, s14, s16
+; GFX9-NEXT:    s_mul_hi_u32 s13, s14, s18
+; GFX9-NEXT:    s_add_i32 s12, s13, s12
+; GFX9-NEXT:    s_mul_i32 s15, s15, s18
+; GFX9-NEXT:    s_add_i32 s12, s12, s15
+; GFX9-NEXT:    s_mul_i32 s14, s14, s18
+; GFX9-NEXT:    s_mul_hi_u32 s15, s16, s14
+; GFX9-NEXT:    s_mul_i32 s17, s16, s14
+; GFX9-NEXT:    s_mul_i32 s20, s18, s12
+; GFX9-NEXT:    s_mul_hi_u32 s14, s18, s14
+; GFX9-NEXT:    s_mul_hi_u32 s19, s18, s12
+; GFX9-NEXT:    s_add_u32 s14, s14, s20
+; GFX9-NEXT:    s_addc_u32 s19, 0, s19
+; GFX9-NEXT:    s_add_u32 s14, s14, s17
+; GFX9-NEXT:    s_mul_hi_u32 s13, s16, s12
+; GFX9-NEXT:    s_addc_u32 s14, s19, s15
+; GFX9-NEXT:    s_addc_u32 s13, s13, 0
+; GFX9-NEXT:    s_mul_i32 s12, s16, s12
+; GFX9-NEXT:    s_add_u32 s12, s14, s12
+; GFX9-NEXT:    s_addc_u32 s14, 0, s13
+; GFX9-NEXT:    s_add_i32 s18, s18, s12
+; GFX9-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
+; GFX9-NEXT:    s_addc_u32 s14, s16, s14
+; GFX9-NEXT:    s_ashr_i32 s12, s9, 31
+; GFX9-NEXT:    s_add_u32 s8, s8, s12
+; GFX9-NEXT:    s_mov_b32 s13, s12
+; GFX9-NEXT:    s_addc_u32 s9, s9, s12
+; GFX9-NEXT:    s_xor_b64 s[8:9], s[8:9], s[12:13]
+; GFX9-NEXT:    s_mul_i32 s16, s8, s14
+; GFX9-NEXT:    s_mul_hi_u32 s17, s8, s18
+; GFX9-NEXT:    s_mul_hi_u32 s15, s8, s14
+; GFX9-NEXT:    s_add_u32 s16, s17, s16
+; GFX9-NEXT:    s_addc_u32 s15, 0, s15
+; GFX9-NEXT:    s_mul_hi_u32 s19, s9, s18
+; GFX9-NEXT:    s_mul_i32 s18, s9, s18
+; GFX9-NEXT:    s_add_u32 s16, s16, s18
+; GFX9-NEXT:    s_mul_hi_u32 s17, s9, s14
+; GFX9-NEXT:    s_addc_u32 s15, s15, s19
+; GFX9-NEXT:    s_addc_u32 s16, s17, 0
+; GFX9-NEXT:    s_mul_i32 s14, s9, s14
+; GFX9-NEXT:    s_add_u32 s18, s15, s14
+; GFX9-NEXT:    s_addc_u32 s19, 0, s16
+; GFX9-NEXT:    s_mul_i32 s14, s6, s19
+; GFX9-NEXT:    s_mul_hi_u32 s15, s6, s18
+; GFX9-NEXT:    s_add_i32 s14, s15, s14
+; GFX9-NEXT:    s_mul_i32 s15, s7, s18
+; GFX9-NEXT:    s_add_i32 s20, s14, s15
+; GFX9-NEXT:    s_sub_i32 s16, s9, s20
+; GFX9-NEXT:    s_mul_i32 s14, s6, s18
+; GFX9-NEXT:    s_sub_i32 s8, s8, s14
+; GFX9-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; GFX9-NEXT:    s_subb_u32 s21, s16, s7
+; GFX9-NEXT:    s_sub_i32 s22, s8, s6
+; GFX9-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GFX9-NEXT:    s_subb_u32 s16, s21, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s16, s7
+; GFX9-NEXT:    s_cselect_b32 s17, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s22, s6
+; GFX9-NEXT:    s_cselect_b32 s21, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s16, s7
+; GFX9-NEXT:    s_cselect_b32 s16, s21, s17
+; GFX9-NEXT:    s_add_u32 s17, s18, 1
+; GFX9-NEXT:    s_addc_u32 s21, s19, 0
+; GFX9-NEXT:    s_add_u32 s22, s18, 2
+; GFX9-NEXT:    s_addc_u32 s23, s19, 0
+; GFX9-NEXT:    s_cmp_lg_u32 s16, 0
+; GFX9-NEXT:    s_cselect_b32 s16, s22, s17
+; GFX9-NEXT:    s_cselect_b32 s17, s23, s21
+; GFX9-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; GFX9-NEXT:    s_subb_u32 s9, s9, s20
+; GFX9-NEXT:    s_cmp_ge_u32 s9, s7
+; GFX9-NEXT:    s_cselect_b32 s14, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s8, s6
+; GFX9-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s9, s7
+; GFX9-NEXT:    s_cselect_b32 s6, s6, s14
+; GFX9-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX9-NEXT:    s_cselect_b32 s7, s17, s19
+; GFX9-NEXT:    s_cselect_b32 s6, s16, s18
+; GFX9-NEXT:    s_xor_b64 s[2:3], s[12:13], s[2:3]
+; GFX9-NEXT:    s_xor_b64 s[6:7], s[6:7], s[2:3]
+; GFX9-NEXT:    s_sub_u32 s14, s6, s2
+; GFX9-NEXT:    s_subb_u32 s15, s7, s3
+; GFX9-NEXT:    s_ashr_i32 s2, s1, 31
+; GFX9-NEXT:    s_add_u32 s0, s0, s2
+; GFX9-NEXT:    s_mov_b32 s3, s2
+; GFX9-NEXT:    s_addc_u32 s1, s1, s2
+; GFX9-NEXT:    s_xor_b64 s[6:7], s[0:1], s[2:3]
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s7
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-NEXT:    s_sub_u32 s8, 0, s6
+; GFX9-NEXT:    s_subb_u32 s9, 0, s7
+; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
+; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mul_f32_e32 v1, 0x5f7ffffc, v1
+; GFX9-NEXT:    v_mul_f32_e32 v2, 0x2f800000, v1
+; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
+; GFX9-NEXT:    v_mac_f32_e32 v1, 0xcf800000, v2
+; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
 ; GFX9-NEXT:    v_readfirstlane_b32 s4, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s5, v0
-; GFX9-NEXT:    s_mul_i32 s16, s0, s4
-; GFX9-NEXT:    s_mul_hi_u32 s18, s0, s5
-; GFX9-NEXT:    s_mul_i32 s17, s1, s5
-; GFX9-NEXT:    s_add_i32 s16, s18, s16
-; GFX9-NEXT:    s_mul_i32 s19, s0, s5
-; GFX9-NEXT:    s_add_i32 s16, s16, s17
-; GFX9-NEXT:    s_mul_hi_u32 s17, s5, s16
-; GFX9-NEXT:    s_mul_i32 s18, s5, s16
-; GFX9-NEXT:    s_mul_hi_u32 s5, s5, s19
-; GFX9-NEXT:    s_add_u32 s5, s5, s18
+; GFX9-NEXT:    v_readfirstlane_b32 s13, v2
+; GFX9-NEXT:    s_mul_hi_u32 s12, s8, s4
+; GFX9-NEXT:    s_mul_i32 s16, s8, s13
+; GFX9-NEXT:    s_mul_i32 s5, s9, s4
+; GFX9-NEXT:    s_add_i32 s12, s12, s16
+; GFX9-NEXT:    s_add_i32 s12, s12, s5
+; GFX9-NEXT:    s_mul_i32 s17, s8, s4
+; GFX9-NEXT:    s_mul_i32 s16, s4, s12
+; GFX9-NEXT:    s_mul_hi_u32 s18, s4, s17
+; GFX9-NEXT:    s_mul_hi_u32 s5, s4, s12
+; GFX9-NEXT:    s_add_u32 s16, s18, s16
+; GFX9-NEXT:    s_addc_u32 s5, 0, s5
+; GFX9-NEXT:    s_mul_hi_u32 s19, s13, s17
+; GFX9-NEXT:    s_mul_i32 s17, s13, s17
+; GFX9-NEXT:    s_add_u32 s16, s16, s17
+; GFX9-NEXT:    s_mul_hi_u32 s18, s13, s12
+; GFX9-NEXT:    s_addc_u32 s5, s5, s19
+; GFX9-NEXT:    s_addc_u32 s16, s18, 0
+; GFX9-NEXT:    s_mul_i32 s12, s13, s12
+; GFX9-NEXT:    s_add_u32 s5, s5, s12
+; GFX9-NEXT:    s_addc_u32 s12, 0, s16
+; GFX9-NEXT:    s_add_i32 s16, s4, s5
+; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_addc_u32 s12, s13, s12
+; GFX9-NEXT:    s_mul_i32 s4, s8, s12
+; GFX9-NEXT:    s_mul_hi_u32 s5, s8, s16
+; GFX9-NEXT:    s_add_i32 s4, s5, s4
+; GFX9-NEXT:    s_mul_i32 s9, s9, s16
+; GFX9-NEXT:    s_add_i32 s4, s4, s9
+; GFX9-NEXT:    s_mul_i32 s8, s8, s16
+; GFX9-NEXT:    s_mul_hi_u32 s9, s12, s8
+; GFX9-NEXT:    s_mul_i32 s13, s12, s8
+; GFX9-NEXT:    s_mul_i32 s18, s16, s4
+; GFX9-NEXT:    s_mul_hi_u32 s8, s16, s8
+; GFX9-NEXT:    s_mul_hi_u32 s17, s16, s4
+; GFX9-NEXT:    s_add_u32 s8, s8, s18
 ; GFX9-NEXT:    s_addc_u32 s17, 0, s17
-; GFX9-NEXT:    s_mul_hi_u32 s20, s4, s19
-; GFX9-NEXT:    s_mul_i32 s19, s4, s19
-; GFX9-NEXT:    s_add_u32 s5, s5, s19
-; GFX9-NEXT:    s_mul_hi_u32 s18, s4, s16
-; GFX9-NEXT:    s_addc_u32 s5, s17, s20
-; GFX9-NEXT:    s_addc_u32 s17, s18, 0
-; GFX9-NEXT:    s_mul_i32 s16, s4, s16
-; GFX9-NEXT:    s_add_u32 s5, s5, s16
-; GFX9-NEXT:    s_addc_u32 s16, 0, s17
-; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s5, v0
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s4, s4, s16
-; GFX9-NEXT:    v_readfirstlane_b32 s16, v0
-; GFX9-NEXT:    s_mul_i32 s5, s0, s4
-; GFX9-NEXT:    s_mul_hi_u32 s17, s0, s16
-; GFX9-NEXT:    s_add_i32 s5, s17, s5
-; GFX9-NEXT:    s_mul_i32 s1, s1, s16
-; GFX9-NEXT:    s_add_i32 s5, s5, s1
-; GFX9-NEXT:    s_mul_i32 s0, s0, s16
-; GFX9-NEXT:    s_mul_hi_u32 s17, s4, s0
-; GFX9-NEXT:    s_mul_i32 s18, s4, s0
-; GFX9-NEXT:    s_mul_i32 s20, s16, s5
-; GFX9-NEXT:    s_mul_hi_u32 s0, s16, s0
-; GFX9-NEXT:    s_mul_hi_u32 s19, s16, s5
-; GFX9-NEXT:    s_add_u32 s0, s0, s20
-; GFX9-NEXT:    s_addc_u32 s16, 0, s19
-; GFX9-NEXT:    s_add_u32 s0, s0, s18
-; GFX9-NEXT:    s_mul_hi_u32 s1, s4, s5
-; GFX9-NEXT:    s_addc_u32 s0, s16, s17
-; GFX9-NEXT:    s_addc_u32 s1, s1, 0
-; GFX9-NEXT:    s_mul_i32 s5, s4, s5
-; GFX9-NEXT:    s_add_u32 s0, s0, s5
-; GFX9-NEXT:    s_addc_u32 s1, 0, s1
-; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s16, s4, s1
-; GFX9-NEXT:    s_ashr_i32 s4, s9, 31
-; GFX9-NEXT:    s_add_u32 s0, s8, s4
+; GFX9-NEXT:    s_add_u32 s8, s8, s13
+; GFX9-NEXT:    s_mul_hi_u32 s5, s12, s4
+; GFX9-NEXT:    s_addc_u32 s8, s17, s9
+; GFX9-NEXT:    s_addc_u32 s5, s5, 0
+; GFX9-NEXT:    s_mul_i32 s4, s12, s4
+; GFX9-NEXT:    s_add_u32 s4, s8, s4
+; GFX9-NEXT:    s_addc_u32 s8, 0, s5
+; GFX9-NEXT:    s_add_i32 s16, s16, s4
+; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_addc_u32 s12, s12, s8
+; GFX9-NEXT:    s_ashr_i32 s4, s11, 31
+; GFX9-NEXT:    s_add_u32 s8, s10, s4
 ; GFX9-NEXT:    s_mov_b32 s5, s4
-; GFX9-NEXT:    s_addc_u32 s1, s9, s4
-; GFX9-NEXT:    s_xor_b64 s[8:9], s[0:1], s[4:5]
-; GFX9-NEXT:    v_readfirstlane_b32 s17, v0
-; GFX9-NEXT:    s_mul_i32 s1, s8, s16
-; GFX9-NEXT:    s_mul_hi_u32 s18, s8, s17
-; GFX9-NEXT:    s_mul_hi_u32 s0, s8, s16
-; GFX9-NEXT:    s_add_u32 s1, s18, s1
-; GFX9-NEXT:    s_addc_u32 s0, 0, s0
-; GFX9-NEXT:    s_mul_hi_u32 s19, s9, s17
-; GFX9-NEXT:    s_mul_i32 s17, s9, s17
-; GFX9-NEXT:    s_add_u32 s1, s1, s17
-; GFX9-NEXT:    s_mul_hi_u32 s18, s9, s16
-; GFX9-NEXT:    s_addc_u32 s0, s0, s19
-; GFX9-NEXT:    s_addc_u32 s1, s18, 0
+; GFX9-NEXT:    s_addc_u32 s9, s11, s4
+; GFX9-NEXT:    s_xor_b64 s[8:9], s[8:9], s[4:5]
+; GFX9-NEXT:    s_mul_i32 s11, s8, s12
+; GFX9-NEXT:    s_mul_hi_u32 s13, s8, s16
+; GFX9-NEXT:    s_mul_hi_u32 s10, s8, s12
+; GFX9-NEXT:    s_add_u32 s11, s13, s11
+; GFX9-NEXT:    s_addc_u32 s10, 0, s10
+; GFX9-NEXT:    s_mul_hi_u32 s17, s9, s16
 ; GFX9-NEXT:    s_mul_i32 s16, s9, s16
-; GFX9-NEXT:    s_add_u32 s16, s0, s16
-; GFX9-NEXT:    s_addc_u32 s17, 0, s1
-; GFX9-NEXT:    s_mul_i32 s0, s14, s17
-; GFX9-NEXT:    s_mul_hi_u32 s1, s14, s16
-; GFX9-NEXT:    s_add_i32 s0, s1, s0
-; GFX9-NEXT:    s_mul_i32 s1, s15, s16
-; GFX9-NEXT:    s_add_i32 s18, s0, s1
-; GFX9-NEXT:    s_mul_i32 s1, s14, s16
-; GFX9-NEXT:    v_mov_b32_e32 v0, s1
-; GFX9-NEXT:    s_sub_i32 s0, s9, s18
-; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s8, v0
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_subb_u32 s8, s0, s15
-; GFX9-NEXT:    v_subrev_co_u32_e64 v1, s[0:1], s14, v0
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    s_subb_u32 s8, s8, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s8, s15
+; GFX9-NEXT:    s_add_u32 s11, s11, s16
+; GFX9-NEXT:    s_mul_hi_u32 s13, s9, s12
+; GFX9-NEXT:    s_addc_u32 s10, s10, s17
+; GFX9-NEXT:    s_addc_u32 s11, s13, 0
+; GFX9-NEXT:    s_mul_i32 s12, s9, s12
+; GFX9-NEXT:    s_add_u32 s16, s10, s12
+; GFX9-NEXT:    s_addc_u32 s17, 0, s11
+; GFX9-NEXT:    s_mul_i32 s10, s6, s17
+; GFX9-NEXT:    s_mul_hi_u32 s11, s6, s16
+; GFX9-NEXT:    s_add_i32 s10, s11, s10
+; GFX9-NEXT:    s_mul_i32 s11, s7, s16
+; GFX9-NEXT:    s_add_i32 s18, s10, s11
+; GFX9-NEXT:    s_sub_i32 s12, s9, s18
+; GFX9-NEXT:    s_mul_i32 s10, s6, s16
+; GFX9-NEXT:    s_sub_i32 s8, s8, s10
+; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GFX9-NEXT:    s_subb_u32 s19, s12, s7
+; GFX9-NEXT:    s_sub_i32 s20, s8, s6
+; GFX9-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
+; GFX9-NEXT:    s_subb_u32 s12, s19, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s12, s7
+; GFX9-NEXT:    s_cselect_b32 s13, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s20, s6
 ; GFX9-NEXT:    s_cselect_b32 s19, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s14, v1
-; GFX9-NEXT:    s_cmp_eq_u32 s8, s15
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, -1, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s19
-; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v2, v1, s[0:1]
-; GFX9-NEXT:    s_add_u32 s0, s16, 1
-; GFX9-NEXT:    s_addc_u32 s8, s17, 0
-; GFX9-NEXT:    s_add_u32 s1, s16, 2
+; GFX9-NEXT:    s_cmp_eq_u32 s12, s7
+; GFX9-NEXT:    s_cselect_b32 s12, s19, s13
+; GFX9-NEXT:    s_add_u32 s13, s16, 1
 ; GFX9-NEXT:    s_addc_u32 s19, s17, 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v1
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v2, v3, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s8
-; GFX9-NEXT:    v_mov_b32_e32 v3, s19
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
-; GFX9-NEXT:    s_subb_u32 s0, s9, s18
-; GFX9-NEXT:    s_cmp_ge_u32 s0, s15
-; GFX9-NEXT:    s_cselect_b32 s1, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s14, v0
-; GFX9-NEXT:    s_cmp_eq_u32 s0, s15
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT:    s_xor_b64 s[0:1], s[4:5], s[12:13]
-; GFX9-NEXT:    s_ashr_i32 s4, s7, 31
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
-; GFX9-NEXT:    s_add_u32 s6, s6, s4
-; GFX9-NEXT:    v_mov_b32_e32 v3, s17
-; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-NEXT:    s_mov_b32 s5, s4
-; GFX9-NEXT:    s_addc_u32 s7, s7, s4
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v3, v2, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v2, s16
-; GFX9-NEXT:    s_xor_b64 s[6:7], s[6:7], s[4:5]
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v2, s6
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, s7
-; GFX9-NEXT:    v_xor_b32_e32 v1, s0, v1
-; GFX9-NEXT:    v_xor_b32_e32 v5, s1, v0
-; GFX9-NEXT:    v_subrev_co_u32_e32 v0, vcc, s0, v1
-; GFX9-NEXT:    v_mac_f32_e32 v2, 0x4f800000, v3
-; GFX9-NEXT:    v_rcp_f32_e32 v2, v2
-; GFX9-NEXT:    s_sub_u32 s0, 0, s6
-; GFX9-NEXT:    v_mov_b32_e32 v6, s1
-; GFX9-NEXT:    s_subb_u32 s1, 0, s7
-; GFX9-NEXT:    v_mul_f32_e32 v2, 0x5f7ffffc, v2
-; GFX9-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v2
-; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
-; GFX9-NEXT:    v_mac_f32_e32 v2, 0xcf800000, v3
-; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v5, v6, vcc
-; GFX9-NEXT:    v_readfirstlane_b32 s8, v2
-; GFX9-NEXT:    v_readfirstlane_b32 s13, v3
-; GFX9-NEXT:    s_mul_hi_u32 s12, s0, s8
-; GFX9-NEXT:    s_mul_i32 s14, s0, s13
-; GFX9-NEXT:    s_mul_i32 s9, s1, s8
-; GFX9-NEXT:    s_add_i32 s12, s12, s14
-; GFX9-NEXT:    s_add_i32 s12, s12, s9
-; GFX9-NEXT:    s_mul_i32 s15, s0, s8
-; GFX9-NEXT:    s_mul_hi_u32 s9, s8, s12
-; GFX9-NEXT:    s_mul_i32 s14, s8, s12
-; GFX9-NEXT:    s_mul_hi_u32 s8, s8, s15
-; GFX9-NEXT:    s_add_u32 s8, s8, s14
-; GFX9-NEXT:    s_addc_u32 s9, 0, s9
-; GFX9-NEXT:    s_mul_hi_u32 s16, s13, s15
-; GFX9-NEXT:    s_mul_i32 s15, s13, s15
-; GFX9-NEXT:    s_add_u32 s8, s8, s15
-; GFX9-NEXT:    s_mul_hi_u32 s14, s13, s12
-; GFX9-NEXT:    s_addc_u32 s8, s9, s16
-; GFX9-NEXT:    s_addc_u32 s9, s14, 0
-; GFX9-NEXT:    s_mul_i32 s12, s13, s12
-; GFX9-NEXT:    s_add_u32 s8, s8, s12
-; GFX9-NEXT:    s_addc_u32 s9, 0, s9
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s8, v2
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s8, s13, s9
-; GFX9-NEXT:    v_readfirstlane_b32 s12, v2
-; GFX9-NEXT:    s_mul_i32 s9, s0, s8
-; GFX9-NEXT:    s_mul_hi_u32 s13, s0, s12
-; GFX9-NEXT:    s_add_i32 s9, s13, s9
-; GFX9-NEXT:    s_mul_i32 s1, s1, s12
-; GFX9-NEXT:    s_add_i32 s9, s9, s1
-; GFX9-NEXT:    s_mul_i32 s0, s0, s12
-; GFX9-NEXT:    s_mul_hi_u32 s13, s8, s0
-; GFX9-NEXT:    s_mul_i32 s14, s8, s0
-; GFX9-NEXT:    s_mul_i32 s16, s12, s9
-; GFX9-NEXT:    s_mul_hi_u32 s0, s12, s0
-; GFX9-NEXT:    s_mul_hi_u32 s15, s12, s9
-; GFX9-NEXT:    s_add_u32 s0, s0, s16
-; GFX9-NEXT:    s_addc_u32 s12, 0, s15
-; GFX9-NEXT:    s_add_u32 s0, s0, s14
-; GFX9-NEXT:    s_mul_hi_u32 s1, s8, s9
-; GFX9-NEXT:    s_addc_u32 s0, s12, s13
-; GFX9-NEXT:    s_addc_u32 s1, s1, 0
-; GFX9-NEXT:    s_mul_i32 s9, s8, s9
-; GFX9-NEXT:    s_add_u32 s0, s0, s9
-; GFX9-NEXT:    s_addc_u32 s1, 0, s1
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s0, v2
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s12, s8, s1
-; GFX9-NEXT:    s_ashr_i32 s8, s11, 31
-; GFX9-NEXT:    s_add_u32 s0, s10, s8
-; GFX9-NEXT:    s_mov_b32 s9, s8
-; GFX9-NEXT:    s_addc_u32 s1, s11, s8
-; GFX9-NEXT:    s_xor_b64 s[10:11], s[0:1], s[8:9]
-; GFX9-NEXT:    v_readfirstlane_b32 s13, v2
-; GFX9-NEXT:    s_mul_i32 s1, s10, s12
-; GFX9-NEXT:    s_mul_hi_u32 s14, s10, s13
-; GFX9-NEXT:    s_mul_hi_u32 s0, s10, s12
-; GFX9-NEXT:    s_add_u32 s1, s14, s1
-; GFX9-NEXT:    s_addc_u32 s0, 0, s0
-; GFX9-NEXT:    s_mul_hi_u32 s15, s11, s13
-; GFX9-NEXT:    s_mul_i32 s13, s11, s13
-; GFX9-NEXT:    s_add_u32 s1, s1, s13
-; GFX9-NEXT:    s_mul_hi_u32 s14, s11, s12
-; GFX9-NEXT:    s_addc_u32 s0, s0, s15
-; GFX9-NEXT:    s_addc_u32 s1, s14, 0
-; GFX9-NEXT:    s_mul_i32 s12, s11, s12
-; GFX9-NEXT:    s_add_u32 s12, s0, s12
-; GFX9-NEXT:    s_addc_u32 s13, 0, s1
-; GFX9-NEXT:    s_mul_i32 s0, s6, s13
-; GFX9-NEXT:    s_mul_hi_u32 s1, s6, s12
-; GFX9-NEXT:    s_add_i32 s0, s1, s0
-; GFX9-NEXT:    s_mul_i32 s1, s7, s12
-; GFX9-NEXT:    s_add_i32 s14, s0, s1
-; GFX9-NEXT:    s_mul_i32 s1, s6, s12
-; GFX9-NEXT:    v_mov_b32_e32 v2, s1
-; GFX9-NEXT:    s_sub_i32 s0, s11, s14
-; GFX9-NEXT:    v_sub_co_u32_e32 v2, vcc, s10, v2
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_subb_u32 s10, s0, s7
-; GFX9-NEXT:    v_subrev_co_u32_e64 v3, s[0:1], s6, v2
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    s_subb_u32 s10, s10, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s10, s7
-; GFX9-NEXT:    s_cselect_b32 s15, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s6, v3
-; GFX9-NEXT:    s_cmp_eq_u32 s10, s7
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, -1, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v5, s15
-; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v5, v3, s[0:1]
-; GFX9-NEXT:    s_add_u32 s0, s12, 1
-; GFX9-NEXT:    s_addc_u32 s10, s13, 0
-; GFX9-NEXT:    s_add_u32 s1, s12, 2
-; GFX9-NEXT:    s_addc_u32 s15, s13, 0
-; GFX9-NEXT:    v_mov_b32_e32 v5, s0
-; GFX9-NEXT:    v_mov_b32_e32 v6, s1
-; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v3
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v5, v6, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v5, s10
-; GFX9-NEXT:    v_mov_b32_e32 v6, s15
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v5, v6, s[0:1]
-; GFX9-NEXT:    s_subb_u32 s0, s11, s14
-; GFX9-NEXT:    s_cmp_ge_u32 s0, s7
-; GFX9-NEXT:    s_cselect_b32 s1, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s6, v2
-; GFX9-NEXT:    s_cmp_eq_u32 s0, s7
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v6, s1
-; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v6, v2, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v6, s13
-; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v6, v5, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v5, s12
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
-; GFX9-NEXT:    s_xor_b64 s[0:1], s[8:9], s[4:5]
-; GFX9-NEXT:    v_xor_b32_e32 v3, s0, v3
-; GFX9-NEXT:    v_xor_b32_e32 v5, s1, v2
-; GFX9-NEXT:    v_mov_b32_e32 v6, s1
-; GFX9-NEXT:    v_subrev_co_u32_e32 v2, vcc, s0, v3
-; GFX9-NEXT:    v_subb_co_u32_e32 v3, vcc, v5, v6, vcc
-; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[2:3]
+; GFX9-NEXT:    s_add_u32 s20, s16, 2
+; GFX9-NEXT:    s_addc_u32 s21, s17, 0
+; GFX9-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX9-NEXT:    s_cselect_b32 s12, s20, s13
+; GFX9-NEXT:    s_cselect_b32 s13, s21, s19
+; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GFX9-NEXT:    s_subb_u32 s9, s9, s18
+; GFX9-NEXT:    s_cmp_ge_u32 s9, s7
+; GFX9-NEXT:    s_cselect_b32 s10, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s8, s6
+; GFX9-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s9, s7
+; GFX9-NEXT:    s_cselect_b32 s6, s6, s10
+; GFX9-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX9-NEXT:    s_cselect_b32 s7, s13, s17
+; GFX9-NEXT:    s_cselect_b32 s6, s12, s16
+; GFX9-NEXT:    s_xor_b64 s[2:3], s[4:5], s[2:3]
+; GFX9-NEXT:    s_xor_b64 s[4:5], s[6:7], s[2:3]
+; GFX9-NEXT:    s_sub_u32 s2, s4, s2
+; GFX9-NEXT:    s_subb_u32 s3, s5, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s14
+; GFX9-NEXT:    v_mov_b32_e32 v2, s15
+; GFX9-NEXT:    v_mov_b32_e32 v3, s2
+; GFX9-NEXT:    v_mov_b32_e32 v4, s3
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_store_dwordx4 v0, v[1:4], s[0:1]
 ; GFX9-NEXT:    s_endpgm
   %shl.y = shl <2 x i64> <i64 4096, i64 4096>, %y
   %r = sdiv <2 x i64> %x, %shl.y
@@ -8983,8 +9060,7 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-LABEL: srem_i64_pow2_shl_denom:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0xd
-; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
+; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX6-NEXT:    s_lshl_b64 s[0:1], 0x1000, s0
 ; GFX6-NEXT:    s_ashr_i32 s2, s1, 31
@@ -8994,130 +9070,167 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    s_xor_b64 s[8:9], s[0:1], s[2:3]
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s8
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s9
-; GFX6-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GFX6-NEXT:    s_sub_u32 s4, 0, s8
-; GFX6-NEXT:    s_subb_u32 s5, 0, s9
+; GFX6-NEXT:    s_sub_u32 s10, 0, s8
+; GFX6-NEXT:    s_subb_u32 s11, 0, s9
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_ashr_i32 s10, s3, 31
-; GFX6-NEXT:    s_add_u32 s2, s2, s10
-; GFX6-NEXT:    s_mov_b32 s11, s10
+; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
 ; GFX6-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT:    s_addc_u32 s3, s3, s10
-; GFX6-NEXT:    s_xor_b64 s[12:13], s[2:3], s[10:11]
-; GFX6-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GFX6-NEXT:    v_mul_lo_u32 v5, s5, v0
-; GFX6-NEXT:    v_mul_lo_u32 v4, s4, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GFX6-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GFX6-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GFX6-NEXT:    v_mul_lo_u32 v6, v1, v4
-; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GFX6-NEXT:    v_mul_hi_u32 v8, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GFX6-NEXT:    v_mul_lo_u32 v4, s5, v0
-; GFX6-NEXT:    s_mov_b32 s5, s1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, s4, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GFX6-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, s12, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s12, v0
-; GFX6-NEXT:    v_mul_hi_u32 v4, s12, v1
-; GFX6-NEXT:    v_mul_hi_u32 v5, s13, v1
-; GFX6-NEXT:    v_mul_lo_u32 v1, s13, v1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v4, s13, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s13, v0
-; GFX6-NEXT:    s_mov_b32 s4, s0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v1, s8, v1
-; GFX6-NEXT:    v_mul_hi_u32 v2, s8, v0
-; GFX6-NEXT:    v_mul_lo_u32 v3, s9, v0
-; GFX6-NEXT:    v_mul_lo_u32 v0, s8, v0
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
-; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s13, v1
-; GFX6-NEXT:    v_mov_b32_e32 v3, s9
-; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s12, v0
-; GFX6-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GFX6-NEXT:    v_subrev_i32_e64 v4, s[0:1], s8, v0
-; GFX6-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s9, v5
-; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s8, v4
-; GFX6-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[2:3], s9, v5
-; GFX6-NEXT:    v_subrev_i32_e64 v3, s[0:1], s8, v4
-; GFX6-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GFX6-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GFX6-NEXT:    v_cndmask_b32_e64 v3, v4, v3, s[0:1]
-; GFX6-NEXT:    v_mov_b32_e32 v4, s13
-; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v4, v1, vcc
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s9, v1
-; GFX6-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s8, v0
-; GFX6-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s9, v1
-; GFX6-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX6-NEXT:    v_xor_b32_e32 v0, s10, v0
-; GFX6-NEXT:    v_xor_b32_e32 v1, s10, v1
-; GFX6-NEXT:    v_mov_b32_e32 v2, s10
-; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s10, v0
-; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GFX6-NEXT:    v_mul_hi_u32 v2, s10, v0
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
+; GFX6-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX6-NEXT:    s_mul_i32 s1, s10, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v2
+; GFX6-NEXT:    s_mul_i32 s13, s11, s0
+; GFX6-NEXT:    s_mul_i32 s14, s10, s0
+; GFX6-NEXT:    s_add_i32 s1, s15, s1
+; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s14
+; GFX6-NEXT:    s_add_i32 s1, s1, s13
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s1
+; GFX6-NEXT:    v_mul_hi_u32 v4, v1, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s13, v3
+; GFX6-NEXT:    s_mul_i32 s15, s0, s1
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s1
+; GFX6-NEXT:    s_add_u32 s13, s13, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v0
+; GFX6-NEXT:    s_addc_u32 s15, 0, s15
+; GFX6-NEXT:    s_mul_i32 s14, s12, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v4
+; GFX6-NEXT:    s_add_u32 s13, s13, s14
+; GFX6-NEXT:    s_addc_u32 s13, s15, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX6-NEXT:    s_addc_u32 s14, s14, 0
+; GFX6-NEXT:    s_mul_i32 s1, s12, s1
+; GFX6-NEXT:    s_add_u32 s1, s13, s1
+; GFX6-NEXT:    s_addc_u32 s13, 0, s14
+; GFX6-NEXT:    s_add_i32 s14, s0, s1
+; GFX6-NEXT:    v_mov_b32_e32 v0, s14
+; GFX6-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s10, v0
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
+; GFX6-NEXT:    s_addc_u32 s12, s12, s13
+; GFX6-NEXT:    s_mul_i32 s0, s10, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX6-NEXT:    s_add_i32 s0, s1, s0
+; GFX6-NEXT:    s_mul_i32 s11, s11, s14
+; GFX6-NEXT:    s_mul_i32 s1, s10, s14
+; GFX6-NEXT:    s_add_i32 s0, s0, s11
+; GFX6-NEXT:    v_mov_b32_e32 v2, s1
+; GFX6-NEXT:    v_mov_b32_e32 v0, s0
+; GFX6-NEXT:    v_mul_hi_u32 v3, s12, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s14, v2
+; GFX6-NEXT:    v_mul_hi_u32 v1, s12, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s14, v0
+; GFX6-NEXT:    s_mul_i32 s11, s14, s0
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v2
+; GFX6-NEXT:    s_add_u32 s11, s15, s11
+; GFX6-NEXT:    v_readfirstlane_b32 s13, v0
+; GFX6-NEXT:    s_mul_i32 s1, s12, s1
+; GFX6-NEXT:    s_addc_u32 s13, 0, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s10, v3
+; GFX6-NEXT:    s_add_u32 s1, s11, s1
+; GFX6-NEXT:    s_addc_u32 s1, s13, s10
+; GFX6-NEXT:    v_readfirstlane_b32 s10, v1
+; GFX6-NEXT:    s_addc_u32 s10, s10, 0
+; GFX6-NEXT:    s_mul_i32 s0, s12, s0
+; GFX6-NEXT:    s_add_u32 s0, s1, s0
+; GFX6-NEXT:    s_addc_u32 s10, 0, s10
+; GFX6-NEXT:    s_add_i32 s13, s14, s0
+; GFX6-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GFX6-NEXT:    s_or_b32 s0, s0, s1
+; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
+; GFX6-NEXT:    s_addc_u32 s12, s12, s10
+; GFX6-NEXT:    s_ashr_i32 s10, s7, 31
+; GFX6-NEXT:    s_add_u32 s0, s6, s10
+; GFX6-NEXT:    s_mov_b32 s11, s10
+; GFX6-NEXT:    s_addc_u32 s1, s7, s10
+; GFX6-NEXT:    s_xor_b64 s[6:7], s[0:1], s[10:11]
+; GFX6-NEXT:    v_mov_b32_e32 v0, s12
+; GFX6-NEXT:    v_mul_hi_u32 v1, s6, v0
+; GFX6-NEXT:    v_mov_b32_e32 v2, s13
+; GFX6-NEXT:    v_mul_hi_u32 v3, s6, v2
+; GFX6-NEXT:    s_mov_b32 s0, s4
+; GFX6-NEXT:    v_readfirstlane_b32 s4, v1
+; GFX6-NEXT:    v_mul_hi_u32 v1, s7, v2
+; GFX6-NEXT:    s_mul_i32 s1, s6, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v3
+; GFX6-NEXT:    v_mul_hi_u32 v0, s7, v0
+; GFX6-NEXT:    s_add_u32 s1, s14, s1
+; GFX6-NEXT:    s_addc_u32 s4, 0, s4
+; GFX6-NEXT:    s_mul_i32 s13, s7, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX6-NEXT:    s_add_u32 s1, s1, s13
+; GFX6-NEXT:    s_addc_u32 s1, s4, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX6-NEXT:    s_addc_u32 s4, s4, 0
+; GFX6-NEXT:    s_mul_i32 s12, s7, s12
+; GFX6-NEXT:    s_add_u32 s12, s1, s12
+; GFX6-NEXT:    v_mov_b32_e32 v0, s12
+; GFX6-NEXT:    v_mul_hi_u32 v0, s8, v0
+; GFX6-NEXT:    s_addc_u32 s4, 0, s4
+; GFX6-NEXT:    s_mov_b32 s1, s5
+; GFX6-NEXT:    s_mul_i32 s4, s8, s4
+; GFX6-NEXT:    v_readfirstlane_b32 s5, v0
+; GFX6-NEXT:    s_add_i32 s4, s5, s4
+; GFX6-NEXT:    s_mul_i32 s5, s9, s12
+; GFX6-NEXT:    s_add_i32 s13, s4, s5
+; GFX6-NEXT:    s_sub_i32 s14, s7, s13
+; GFX6-NEXT:    s_mul_i32 s4, s8, s12
+; GFX6-NEXT:    s_sub_i32 s6, s6, s4
+; GFX6-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX6-NEXT:    s_or_b32 s12, s4, s5
+; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX6-NEXT:    s_subb_u32 s14, s14, s9
+; GFX6-NEXT:    s_sub_i32 s15, s6, s8
+; GFX6-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX6-NEXT:    s_or_b32 s4, s4, s5
+; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX6-NEXT:    s_subb_u32 s16, s14, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s16, s9
+; GFX6-NEXT:    s_cselect_b32 s5, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s15, s8
+; GFX6-NEXT:    s_cselect_b32 s17, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s16, s9
+; GFX6-NEXT:    s_cselect_b32 s17, s17, s5
+; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX6-NEXT:    s_subb_u32 s14, s14, s9
+; GFX6-NEXT:    s_sub_i32 s18, s15, s8
+; GFX6-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX6-NEXT:    s_or_b32 s4, s4, s5
+; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX6-NEXT:    s_subb_u32 s4, s14, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s17, 0
+; GFX6-NEXT:    s_cselect_b32 s14, s18, s15
+; GFX6-NEXT:    s_cselect_b32 s4, s4, s16
+; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX6-NEXT:    s_subb_u32 s5, s7, s13
+; GFX6-NEXT:    s_cmp_ge_u32 s5, s9
+; GFX6-NEXT:    s_cselect_b32 s7, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s6, s8
+; GFX6-NEXT:    s_cselect_b32 s8, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s5, s9
+; GFX6-NEXT:    s_cselect_b32 s7, s8, s7
+; GFX6-NEXT:    s_cmp_lg_u32 s7, 0
+; GFX6-NEXT:    s_cselect_b32 s5, s4, s5
+; GFX6-NEXT:    s_cselect_b32 s4, s14, s6
+; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[10:11]
+; GFX6-NEXT:    s_sub_u32 s4, s4, s10
+; GFX6-NEXT:    s_subb_u32 s5, s5, s10
+; GFX6-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NEXT:    v_mov_b32_e32 v1, s5
+; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: srem_i64_pow2_shl_denom:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x34
-; GFX9-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_lshl_b64 s[0:1], 0x1000, s0
 ; GFX9-NEXT:    s_ashr_i32 s2, s1, 31
@@ -9127,8 +9240,9 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    s_xor_b64 s[6:7], s[0:1], s[2:3]
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s7
-; GFX9-NEXT:    s_sub_u32 s0, 0, s6
-; GFX9-NEXT:    s_subb_u32 s1, 0, s7
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT:    s_sub_u32 s8, 0, s6
+; GFX9-NEXT:    s_subb_u32 s9, 0, s7
 ; GFX9-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
@@ -9138,127 +9252,123 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    v_madmk_f32 v1, v2, 0xcf800000, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s2, v2
-; GFX9-NEXT:    v_readfirstlane_b32 s3, v1
-; GFX9-NEXT:    s_mul_i32 s4, s0, s2
-; GFX9-NEXT:    s_mul_hi_u32 s12, s0, s3
-; GFX9-NEXT:    s_mul_i32 s5, s1, s3
-; GFX9-NEXT:    s_add_i32 s4, s12, s4
-; GFX9-NEXT:    s_mul_i32 s13, s0, s3
-; GFX9-NEXT:    s_add_i32 s4, s4, s5
-; GFX9-NEXT:    s_mul_hi_u32 s12, s3, s13
-; GFX9-NEXT:    s_mul_hi_u32 s5, s3, s4
-; GFX9-NEXT:    s_mul_i32 s3, s3, s4
-; GFX9-NEXT:    s_add_u32 s3, s12, s3
-; GFX9-NEXT:    s_addc_u32 s5, 0, s5
-; GFX9-NEXT:    s_mul_hi_u32 s14, s2, s13
-; GFX9-NEXT:    s_mul_i32 s13, s2, s13
-; GFX9-NEXT:    s_add_u32 s3, s3, s13
-; GFX9-NEXT:    s_mul_hi_u32 s12, s2, s4
-; GFX9-NEXT:    s_addc_u32 s3, s5, s14
-; GFX9-NEXT:    s_addc_u32 s5, s12, 0
-; GFX9-NEXT:    s_mul_i32 s4, s2, s4
-; GFX9-NEXT:    s_add_u32 s3, s3, s4
-; GFX9-NEXT:    s_addc_u32 s4, 0, s5
-; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s3, v1
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s2, s2, s4
+; GFX9-NEXT:    v_readfirstlane_b32 s10, v2
 ; GFX9-NEXT:    v_readfirstlane_b32 s4, v1
-; GFX9-NEXT:    s_mul_i32 s3, s0, s2
-; GFX9-NEXT:    s_mul_hi_u32 s5, s0, s4
-; GFX9-NEXT:    s_add_i32 s3, s5, s3
-; GFX9-NEXT:    s_mul_i32 s1, s1, s4
-; GFX9-NEXT:    s_add_i32 s3, s3, s1
-; GFX9-NEXT:    s_mul_i32 s0, s0, s4
-; GFX9-NEXT:    s_mul_hi_u32 s5, s2, s0
-; GFX9-NEXT:    s_mul_i32 s12, s2, s0
-; GFX9-NEXT:    s_mul_i32 s14, s4, s3
-; GFX9-NEXT:    s_mul_hi_u32 s0, s4, s0
-; GFX9-NEXT:    s_mul_hi_u32 s13, s4, s3
-; GFX9-NEXT:    s_add_u32 s0, s0, s14
-; GFX9-NEXT:    s_addc_u32 s4, 0, s13
-; GFX9-NEXT:    s_add_u32 s0, s0, s12
-; GFX9-NEXT:    s_mul_hi_u32 s1, s2, s3
-; GFX9-NEXT:    s_addc_u32 s0, s4, s5
-; GFX9-NEXT:    s_addc_u32 s1, s1, 0
-; GFX9-NEXT:    s_mul_i32 s3, s2, s3
-; GFX9-NEXT:    s_add_u32 s0, s0, s3
-; GFX9-NEXT:    s_addc_u32 s1, 0, s1
-; GFX9-NEXT:    v_add_co_u32_e32 v1, vcc, s0, v1
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s2, s2, s1
-; GFX9-NEXT:    s_ashr_i32 s4, s11, 31
-; GFX9-NEXT:    s_add_u32 s0, s10, s4
+; GFX9-NEXT:    s_mul_i32 s5, s8, s10
+; GFX9-NEXT:    s_mul_hi_u32 s12, s8, s4
+; GFX9-NEXT:    s_mul_i32 s11, s9, s4
+; GFX9-NEXT:    s_add_i32 s5, s12, s5
+; GFX9-NEXT:    s_mul_i32 s13, s8, s4
+; GFX9-NEXT:    s_add_i32 s5, s5, s11
+; GFX9-NEXT:    s_mul_hi_u32 s12, s4, s13
+; GFX9-NEXT:    s_mul_i32 s14, s4, s5
+; GFX9-NEXT:    s_mul_hi_u32 s11, s4, s5
+; GFX9-NEXT:    s_add_u32 s12, s12, s14
+; GFX9-NEXT:    s_addc_u32 s11, 0, s11
+; GFX9-NEXT:    s_mul_hi_u32 s15, s10, s13
+; GFX9-NEXT:    s_mul_i32 s13, s10, s13
+; GFX9-NEXT:    s_add_u32 s12, s12, s13
+; GFX9-NEXT:    s_mul_hi_u32 s14, s10, s5
+; GFX9-NEXT:    s_addc_u32 s11, s11, s15
+; GFX9-NEXT:    s_addc_u32 s12, s14, 0
+; GFX9-NEXT:    s_mul_i32 s5, s10, s5
+; GFX9-NEXT:    s_add_u32 s5, s11, s5
+; GFX9-NEXT:    s_addc_u32 s11, 0, s12
+; GFX9-NEXT:    s_add_i32 s12, s4, s5
+; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_addc_u32 s10, s10, s11
+; GFX9-NEXT:    s_mul_i32 s4, s8, s10
+; GFX9-NEXT:    s_mul_hi_u32 s5, s8, s12
+; GFX9-NEXT:    s_add_i32 s4, s5, s4
+; GFX9-NEXT:    s_mul_i32 s9, s9, s12
+; GFX9-NEXT:    s_add_i32 s4, s4, s9
+; GFX9-NEXT:    s_mul_i32 s8, s8, s12
+; GFX9-NEXT:    s_mul_hi_u32 s9, s10, s8
+; GFX9-NEXT:    s_mul_i32 s11, s10, s8
+; GFX9-NEXT:    s_mul_i32 s14, s12, s4
+; GFX9-NEXT:    s_mul_hi_u32 s8, s12, s8
+; GFX9-NEXT:    s_mul_hi_u32 s13, s12, s4
+; GFX9-NEXT:    s_add_u32 s8, s8, s14
+; GFX9-NEXT:    s_addc_u32 s13, 0, s13
+; GFX9-NEXT:    s_add_u32 s8, s8, s11
+; GFX9-NEXT:    s_mul_hi_u32 s5, s10, s4
+; GFX9-NEXT:    s_addc_u32 s8, s13, s9
+; GFX9-NEXT:    s_addc_u32 s5, s5, 0
+; GFX9-NEXT:    s_mul_i32 s4, s10, s4
+; GFX9-NEXT:    s_add_u32 s4, s8, s4
+; GFX9-NEXT:    s_addc_u32 s8, 0, s5
+; GFX9-NEXT:    s_add_i32 s12, s12, s4
+; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_addc_u32 s8, s10, s8
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
+; GFX9-NEXT:    s_add_u32 s2, s2, s4
 ; GFX9-NEXT:    s_mov_b32 s5, s4
-; GFX9-NEXT:    s_addc_u32 s1, s11, s4
-; GFX9-NEXT:    s_xor_b64 s[10:11], s[0:1], s[4:5]
-; GFX9-NEXT:    v_readfirstlane_b32 s3, v1
-; GFX9-NEXT:    s_mul_i32 s1, s10, s2
-; GFX9-NEXT:    s_mul_hi_u32 s5, s10, s3
-; GFX9-NEXT:    s_mul_hi_u32 s0, s10, s2
-; GFX9-NEXT:    s_add_u32 s1, s5, s1
-; GFX9-NEXT:    s_addc_u32 s0, 0, s0
-; GFX9-NEXT:    s_mul_hi_u32 s12, s11, s3
-; GFX9-NEXT:    s_mul_i32 s3, s11, s3
-; GFX9-NEXT:    s_add_u32 s1, s1, s3
-; GFX9-NEXT:    s_mul_hi_u32 s5, s11, s2
-; GFX9-NEXT:    s_addc_u32 s0, s0, s12
-; GFX9-NEXT:    s_addc_u32 s1, s5, 0
-; GFX9-NEXT:    s_mul_i32 s2, s11, s2
-; GFX9-NEXT:    s_add_u32 s0, s0, s2
-; GFX9-NEXT:    s_addc_u32 s1, 0, s1
-; GFX9-NEXT:    s_mul_i32 s1, s6, s1
-; GFX9-NEXT:    s_mul_hi_u32 s2, s6, s0
-; GFX9-NEXT:    s_add_i32 s1, s2, s1
-; GFX9-NEXT:    s_mul_i32 s2, s7, s0
-; GFX9-NEXT:    s_mul_i32 s0, s6, s0
-; GFX9-NEXT:    s_add_i32 s5, s1, s2
-; GFX9-NEXT:    v_mov_b32_e32 v1, s0
-; GFX9-NEXT:    s_sub_i32 s1, s11, s5
-; GFX9-NEXT:    v_sub_co_u32_e32 v1, vcc, s10, v1
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_subb_u32 s10, s1, s7
-; GFX9-NEXT:    v_subrev_co_u32_e64 v2, s[0:1], s6, v1
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    s_subb_u32 s12, s10, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s12, s7
-; GFX9-NEXT:    s_cselect_b32 s13, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[2:3], s6, v2
-; GFX9-NEXT:    s_cmp_eq_u32 s12, s7
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, -1, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v4, s13
-; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v4, v3, s[2:3]
-; GFX9-NEXT:    s_subb_u32 s2, s10, s7
-; GFX9-NEXT:    v_subrev_co_u32_e64 v4, s[0:1], s6, v2
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    s_subb_u32 s2, s2, 0
-; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v3
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v4, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v3, s12
-; GFX9-NEXT:    v_mov_b32_e32 v4, s2
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v4, s[0:1]
-; GFX9-NEXT:    s_subb_u32 s0, s11, s5
-; GFX9-NEXT:    s_cmp_ge_u32 s0, s7
-; GFX9-NEXT:    s_cselect_b32 s1, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s6, v1
-; GFX9-NEXT:    s_cmp_eq_u32 s0, s7
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v5, s1
-; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT:    v_cndmask_b32_e32 v4, v5, v4, vcc
-; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX9-NEXT:    v_mov_b32_e32 v5, s0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
-; GFX9-NEXT:    v_xor_b32_e32 v1, s4, v1
-; GFX9-NEXT:    v_xor_b32_e32 v2, s4, v3
-; GFX9-NEXT:    v_mov_b32_e32 v3, s4
-; GFX9-NEXT:    v_subrev_co_u32_e32 v1, vcc, s4, v1
-; GFX9-NEXT:    v_subb_co_u32_e32 v2, vcc, v2, v3, vcc
-; GFX9-NEXT:    global_store_dwordx2 v0, v[1:2], s[8:9]
+; GFX9-NEXT:    s_addc_u32 s3, s3, s4
+; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
+; GFX9-NEXT:    s_mul_i32 s10, s2, s8
+; GFX9-NEXT:    s_mul_hi_u32 s11, s2, s12
+; GFX9-NEXT:    s_mul_hi_u32 s9, s2, s8
+; GFX9-NEXT:    s_add_u32 s10, s11, s10
+; GFX9-NEXT:    s_addc_u32 s9, 0, s9
+; GFX9-NEXT:    s_mul_hi_u32 s13, s3, s12
+; GFX9-NEXT:    s_mul_i32 s12, s3, s12
+; GFX9-NEXT:    s_add_u32 s10, s10, s12
+; GFX9-NEXT:    s_mul_hi_u32 s11, s3, s8
+; GFX9-NEXT:    s_addc_u32 s9, s9, s13
+; GFX9-NEXT:    s_addc_u32 s10, s11, 0
+; GFX9-NEXT:    s_mul_i32 s8, s3, s8
+; GFX9-NEXT:    s_add_u32 s8, s9, s8
+; GFX9-NEXT:    s_addc_u32 s9, 0, s10
+; GFX9-NEXT:    s_mul_i32 s9, s6, s9
+; GFX9-NEXT:    s_mul_hi_u32 s10, s6, s8
+; GFX9-NEXT:    s_add_i32 s9, s10, s9
+; GFX9-NEXT:    s_mul_i32 s10, s7, s8
+; GFX9-NEXT:    s_add_i32 s12, s9, s10
+; GFX9-NEXT:    s_sub_i32 s10, s3, s12
+; GFX9-NEXT:    s_mul_i32 s8, s6, s8
+; GFX9-NEXT:    s_sub_i32 s2, s2, s8
+; GFX9-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX9-NEXT:    s_subb_u32 s13, s10, s7
+; GFX9-NEXT:    s_sub_i32 s14, s2, s6
+; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GFX9-NEXT:    s_subb_u32 s15, s13, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s15, s7
+; GFX9-NEXT:    s_cselect_b32 s16, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s14, s6
+; GFX9-NEXT:    s_cselect_b32 s17, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s15, s7
+; GFX9-NEXT:    s_cselect_b32 s16, s17, s16
+; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GFX9-NEXT:    s_subb_u32 s13, s13, s7
+; GFX9-NEXT:    s_sub_i32 s17, s14, s6
+; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GFX9-NEXT:    s_subb_u32 s10, s13, 0
+; GFX9-NEXT:    s_cmp_lg_u32 s16, 0
+; GFX9-NEXT:    s_cselect_b32 s11, s17, s14
+; GFX9-NEXT:    s_cselect_b32 s10, s10, s15
+; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX9-NEXT:    s_subb_u32 s3, s3, s12
+; GFX9-NEXT:    s_cmp_ge_u32 s3, s7
+; GFX9-NEXT:    s_cselect_b32 s8, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s2, s6
+; GFX9-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s3, s7
+; GFX9-NEXT:    s_cselect_b32 s6, s6, s8
+; GFX9-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX9-NEXT:    s_cselect_b32 s3, s10, s3
+; GFX9-NEXT:    s_cselect_b32 s2, s11, s2
+; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
+; GFX9-NEXT:    s_sub_u32 s2, s2, s4
+; GFX9-NEXT:    s_subb_u32 s3, s3, s4
+; GFX9-NEXT:    v_mov_b32_e32 v1, s2
+; GFX9-NEXT:    v_mov_b32_e32 v2, s3
+; GFX9-NEXT:    global_store_dwordx2 v0, v[1:2], s[0:1]
 ; GFX9-NEXT:    s_endpgm
   %shl.y = shl i64 4096, %y
   %r = srem i64 %x, %shl.y
@@ -9353,272 +9463,347 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-LABEL: srem_v2i64_pow2_shl_denom:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0xd
-; GFX6-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x9
-; GFX6-NEXT:    s_mov_b32 s7, 0xf000
-; GFX6-NEXT:    s_mov_b32 s6, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NEXT:    s_lshl_b64 s[0:1], 0x1000, s12
-; GFX6-NEXT:    s_lshl_b64 s[16:17], 0x1000, s14
+; GFX6-NEXT:    s_lshl_b64 s[2:3], 0x1000, s12
+; GFX6-NEXT:    s_lshl_b64 s[0:1], 0x1000, s14
+; GFX6-NEXT:    s_ashr_i32 s6, s3, 31
+; GFX6-NEXT:    s_add_u32 s2, s2, s6
+; GFX6-NEXT:    s_mov_b32 s7, s6
+; GFX6-NEXT:    s_addc_u32 s3, s3, s6
+; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s3
+; GFX6-NEXT:    s_sub_u32 s12, 0, s2
+; GFX6-NEXT:    s_subb_u32 s13, 0, s3
+; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
+; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
+; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
+; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
+; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GFX6-NEXT:    v_mul_hi_u32 v2, s12, v0
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX6-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX6-NEXT:    s_mul_i32 s7, s12, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
+; GFX6-NEXT:    s_mul_i32 s15, s13, s6
+; GFX6-NEXT:    s_mul_i32 s16, s12, s6
+; GFX6-NEXT:    s_add_i32 s7, s17, s7
+; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s16
+; GFX6-NEXT:    s_add_i32 s7, s7, s15
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s7
+; GFX6-NEXT:    v_mul_hi_u32 v4, v1, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v3
+; GFX6-NEXT:    s_mul_i32 s18, s6, s7
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s7
+; GFX6-NEXT:    s_add_u32 s15, s15, s18
+; GFX6-NEXT:    v_readfirstlane_b32 s18, v0
+; GFX6-NEXT:    s_mul_i32 s16, s14, s16
+; GFX6-NEXT:    s_addc_u32 s18, 0, s18
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v4
+; GFX6-NEXT:    s_add_u32 s15, s15, s16
+; GFX6-NEXT:    s_addc_u32 s15, s18, s17
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
+; GFX6-NEXT:    s_addc_u32 s16, s16, 0
+; GFX6-NEXT:    s_mul_i32 s7, s14, s7
+; GFX6-NEXT:    s_add_u32 s7, s15, s7
+; GFX6-NEXT:    s_addc_u32 s15, 0, s16
+; GFX6-NEXT:    s_add_i32 s16, s6, s7
+; GFX6-NEXT:    v_mov_b32_e32 v0, s16
+; GFX6-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
+; GFX6-NEXT:    s_or_b32 s6, s6, s7
+; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX6-NEXT:    s_addc_u32 s14, s14, s15
+; GFX6-NEXT:    s_mul_i32 s6, s12, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s7, v0
+; GFX6-NEXT:    s_add_i32 s6, s7, s6
+; GFX6-NEXT:    s_mul_i32 s13, s13, s16
+; GFX6-NEXT:    s_mul_i32 s7, s12, s16
+; GFX6-NEXT:    s_add_i32 s6, s6, s13
+; GFX6-NEXT:    v_mov_b32_e32 v2, s7
+; GFX6-NEXT:    v_mov_b32_e32 v0, s6
+; GFX6-NEXT:    v_mul_hi_u32 v3, s14, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s16, v2
+; GFX6-NEXT:    v_mul_hi_u32 v1, s14, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s16, v0
+; GFX6-NEXT:    s_mul_i32 s13, s16, s6
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
+; GFX6-NEXT:    s_add_u32 s13, s17, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v0
+; GFX6-NEXT:    s_mul_i32 s7, s14, s7
+; GFX6-NEXT:    s_addc_u32 s15, 0, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v3
+; GFX6-NEXT:    s_add_u32 s7, s13, s7
+; GFX6-NEXT:    s_addc_u32 s7, s15, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
+; GFX6-NEXT:    s_addc_u32 s12, s12, 0
+; GFX6-NEXT:    s_mul_i32 s6, s14, s6
+; GFX6-NEXT:    s_add_u32 s6, s7, s6
+; GFX6-NEXT:    s_addc_u32 s12, 0, s12
+; GFX6-NEXT:    s_add_i32 s13, s16, s6
+; GFX6-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GFX6-NEXT:    s_or_b32 s6, s6, s7
+; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX6-NEXT:    s_addc_u32 s12, s14, s12
+; GFX6-NEXT:    s_ashr_i32 s6, s9, 31
+; GFX6-NEXT:    s_add_u32 s8, s8, s6
+; GFX6-NEXT:    s_mov_b32 s7, s6
+; GFX6-NEXT:    s_addc_u32 s9, s9, s6
+; GFX6-NEXT:    s_xor_b64 s[8:9], s[8:9], s[6:7]
+; GFX6-NEXT:    v_mov_b32_e32 v0, s12
+; GFX6-NEXT:    v_mul_hi_u32 v1, s8, v0
+; GFX6-NEXT:    v_mov_b32_e32 v2, s13
+; GFX6-NEXT:    v_mul_hi_u32 v3, s8, v2
+; GFX6-NEXT:    s_mul_i32 s14, s8, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v1
+; GFX6-NEXT:    v_mul_hi_u32 v1, s9, v2
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v3
+; GFX6-NEXT:    v_mul_hi_u32 v0, s9, v0
+; GFX6-NEXT:    s_add_u32 s14, s16, s14
+; GFX6-NEXT:    s_addc_u32 s15, 0, s15
+; GFX6-NEXT:    s_mul_i32 s13, s9, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
+; GFX6-NEXT:    s_add_u32 s13, s14, s13
+; GFX6-NEXT:    s_addc_u32 s13, s15, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v0
+; GFX6-NEXT:    s_addc_u32 s14, s14, 0
+; GFX6-NEXT:    s_mul_i32 s12, s9, s12
+; GFX6-NEXT:    s_add_u32 s12, s13, s12
+; GFX6-NEXT:    v_mov_b32_e32 v0, s12
+; GFX6-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GFX6-NEXT:    s_addc_u32 s13, 0, s14
+; GFX6-NEXT:    s_mul_i32 s13, s2, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v0
+; GFX6-NEXT:    s_add_i32 s13, s14, s13
+; GFX6-NEXT:    s_mul_i32 s14, s3, s12
+; GFX6-NEXT:    s_add_i32 s14, s13, s14
+; GFX6-NEXT:    s_sub_i32 s15, s9, s14
+; GFX6-NEXT:    s_mul_i32 s12, s2, s12
+; GFX6-NEXT:    s_sub_i32 s8, s8, s12
+; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX6-NEXT:    s_or_b32 s16, s12, s13
+; GFX6-NEXT:    s_cmp_lg_u32 s16, 0
+; GFX6-NEXT:    s_subb_u32 s15, s15, s3
+; GFX6-NEXT:    s_sub_i32 s17, s8, s2
+; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX6-NEXT:    s_or_b32 s12, s12, s13
+; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX6-NEXT:    s_subb_u32 s18, s15, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s18, s3
+; GFX6-NEXT:    s_cselect_b32 s13, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s17, s2
+; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s18, s3
+; GFX6-NEXT:    s_cselect_b32 s19, s19, s13
+; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX6-NEXT:    s_subb_u32 s15, s15, s3
+; GFX6-NEXT:    s_sub_i32 s20, s17, s2
+; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX6-NEXT:    s_or_b32 s12, s12, s13
+; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX6-NEXT:    s_subb_u32 s12, s15, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s19, 0
+; GFX6-NEXT:    s_cselect_b32 s13, s20, s17
+; GFX6-NEXT:    s_cselect_b32 s12, s12, s18
+; GFX6-NEXT:    s_cmp_lg_u32 s16, 0
+; GFX6-NEXT:    s_subb_u32 s9, s9, s14
+; GFX6-NEXT:    s_cmp_ge_u32 s9, s3
+; GFX6-NEXT:    s_cselect_b32 s14, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s8, s2
+; GFX6-NEXT:    s_cselect_b32 s2, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s9, s3
+; GFX6-NEXT:    s_cselect_b32 s2, s2, s14
+; GFX6-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX6-NEXT:    s_cselect_b32 s3, s12, s9
+; GFX6-NEXT:    s_cselect_b32 s2, s13, s8
+; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
+; GFX6-NEXT:    s_sub_u32 s12, s2, s6
+; GFX6-NEXT:    s_subb_u32 s13, s3, s6
 ; GFX6-NEXT:    s_ashr_i32 s2, s1, 31
 ; GFX6-NEXT:    s_add_u32 s0, s0, s2
 ; GFX6-NEXT:    s_mov_b32 s3, s2
 ; GFX6-NEXT:    s_addc_u32 s1, s1, s2
-; GFX6-NEXT:    s_xor_b64 s[14:15], s[0:1], s[2:3]
-; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s14
-; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s15
-; GFX6-NEXT:    s_sub_u32 s0, 0, s14
-; GFX6-NEXT:    s_subb_u32 s1, 0, s15
-; GFX6-NEXT:    s_ashr_i32 s12, s9, 31
+; GFX6-NEXT:    s_xor_b64 s[6:7], s[0:1], s[2:3]
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s7
+; GFX6-NEXT:    s_sub_u32 s8, 0, s6
+; GFX6-NEXT:    s_subb_u32 s9, 0, s7
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX6-NEXT:    s_mov_b32 s13, s12
 ; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
-; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v2, s0, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v5, s1, v0
-; GFX6-NEXT:    v_mul_lo_u32 v4, s0, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GFX6-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GFX6-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v6, v1, v4
-; GFX6-NEXT:    v_mul_lo_u32 v4, v1, v4
-; GFX6-NEXT:    v_mul_hi_u32 v8, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v5, v6, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, s0, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s0, v0
-; GFX6-NEXT:    v_mul_lo_u32 v4, s1, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, s0, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GFX6-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GFX6-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GFX6-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    s_add_u32 s0, s8, s12
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    s_addc_u32 s1, s9, s12
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    s_xor_b64 s[8:9], s[0:1], s[12:13]
-; GFX6-NEXT:    v_mul_lo_u32 v2, s8, v1
-; GFX6-NEXT:    v_mul_hi_u32 v3, s8, v0
-; GFX6-NEXT:    v_mul_hi_u32 v4, s8, v1
-; GFX6-NEXT:    v_mul_hi_u32 v5, s9, v1
-; GFX6-NEXT:    v_mul_lo_u32 v1, s9, v1
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v4, s9, v0
+; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GFX6-NEXT:    v_mul_hi_u32 v2, s8, v0
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX6-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX6-NEXT:    s_mul_i32 s1, s8, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s3, v2
+; GFX6-NEXT:    s_mul_i32 s0, s9, s2
+; GFX6-NEXT:    s_add_i32 s1, s3, s1
+; GFX6-NEXT:    s_add_i32 s3, s1, s0
+; GFX6-NEXT:    s_mul_i32 s15, s8, s2
+; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s3
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s15
+; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
+; GFX6-NEXT:    s_mul_i32 s4, s2, s3
+; GFX6-NEXT:    v_readfirstlane_b32 s5, v2
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, v1, s15
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s3
+; GFX6-NEXT:    s_add_u32 s4, s16, s4
+; GFX6-NEXT:    s_addc_u32 s5, 0, s5
+; GFX6-NEXT:    s_mul_i32 s15, s14, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v0
+; GFX6-NEXT:    s_add_u32 s4, s4, s15
+; GFX6-NEXT:    s_addc_u32 s4, s5, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX6-NEXT:    s_addc_u32 s5, s5, 0
+; GFX6-NEXT:    s_mul_i32 s3, s14, s3
+; GFX6-NEXT:    s_add_u32 s3, s4, s3
+; GFX6-NEXT:    s_addc_u32 s4, 0, s5
+; GFX6-NEXT:    s_add_i32 s5, s2, s3
+; GFX6-NEXT:    v_mov_b32_e32 v0, s5
+; GFX6-NEXT:    s_cselect_b64 s[2:3], 1, 0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s8, v0
+; GFX6-NEXT:    s_or_b32 s2, s2, s3
+; GFX6-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX6-NEXT:    s_addc_u32 s4, s14, s4
+; GFX6-NEXT:    s_mul_i32 s2, s8, s4
+; GFX6-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX6-NEXT:    s_add_i32 s2, s3, s2
+; GFX6-NEXT:    s_mul_i32 s9, s9, s5
+; GFX6-NEXT:    s_mul_i32 s3, s8, s5
+; GFX6-NEXT:    s_add_i32 s2, s2, s9
+; GFX6-NEXT:    v_mov_b32_e32 v2, s3
+; GFX6-NEXT:    v_mov_b32_e32 v0, s2
+; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s5, v2
+; GFX6-NEXT:    v_mul_hi_u32 v1, s4, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s5, v0
+; GFX6-NEXT:    s_mul_i32 s9, s5, s2
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v2
+; GFX6-NEXT:    s_add_u32 s9, s15, s9
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v0
+; GFX6-NEXT:    s_mul_i32 s3, s4, s3
+; GFX6-NEXT:    s_addc_u32 s14, 0, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s8, v3
+; GFX6-NEXT:    s_add_u32 s3, s9, s3
+; GFX6-NEXT:    s_addc_u32 s3, s14, s8
+; GFX6-NEXT:    v_readfirstlane_b32 s8, v1
+; GFX6-NEXT:    s_addc_u32 s8, s8, 0
+; GFX6-NEXT:    s_mul_i32 s2, s4, s2
+; GFX6-NEXT:    s_add_u32 s2, s3, s2
+; GFX6-NEXT:    s_addc_u32 s8, 0, s8
+; GFX6-NEXT:    s_add_i32 s14, s5, s2
+; GFX6-NEXT:    s_cselect_b64 s[2:3], 1, 0
+; GFX6-NEXT:    s_or_b32 s2, s2, s3
+; GFX6-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX6-NEXT:    s_addc_u32 s15, s4, s8
+; GFX6-NEXT:    s_ashr_i32 s4, s11, 31
+; GFX6-NEXT:    s_add_u32 s2, s10, s4
+; GFX6-NEXT:    s_mov_b32 s5, s4
+; GFX6-NEXT:    s_addc_u32 s3, s11, s4
+; GFX6-NEXT:    s_xor_b64 s[8:9], s[2:3], s[4:5]
+; GFX6-NEXT:    v_mov_b32_e32 v0, s15
+; GFX6-NEXT:    v_mul_hi_u32 v1, s8, v0
+; GFX6-NEXT:    v_mov_b32_e32 v2, s14
+; GFX6-NEXT:    v_mul_hi_u32 v3, s8, v2
+; GFX6-NEXT:    s_mul_i32 s2, s8, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s10, v1
+; GFX6-NEXT:    v_mul_hi_u32 v1, s9, v2
+; GFX6-NEXT:    v_readfirstlane_b32 s11, v3
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s9, v0
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v1, s14, v1
-; GFX6-NEXT:    v_mul_hi_u32 v2, s14, v0
-; GFX6-NEXT:    v_mul_lo_u32 v3, s15, v0
-; GFX6-NEXT:    v_mul_lo_u32 v0, s14, v0
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; GFX6-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
-; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s9, v1
-; GFX6-NEXT:    v_mov_b32_e32 v3, s15
-; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, s8, v0
-; GFX6-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GFX6-NEXT:    v_subrev_i32_e64 v4, s[0:1], s14, v0
-; GFX6-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s15, v5
-; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s14, v4
-; GFX6-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[2:3], s15, v5
-; GFX6-NEXT:    v_subrev_i32_e64 v3, s[0:1], s14, v4
-; GFX6-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GFX6-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GFX6-NEXT:    v_cndmask_b32_e64 v3, v4, v3, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
-; GFX6-NEXT:    s_ashr_i32 s0, s17, 31
-; GFX6-NEXT:    s_add_u32 s2, s16, s0
-; GFX6-NEXT:    s_mov_b32 s1, s0
-; GFX6-NEXT:    s_addc_u32 s3, s17, s0
-; GFX6-NEXT:    v_mov_b32_e32 v4, s9
-; GFX6-NEXT:    s_xor_b64 s[8:9], s[2:3], s[0:1]
-; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v4, v1, vcc
-; GFX6-NEXT:    v_cvt_f32_u32_e32 v4, s8
-; GFX6-NEXT:    v_cvt_f32_u32_e32 v5, s9
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s15, v1
-; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s14, v0
-; GFX6-NEXT:    v_mac_f32_e32 v4, 0x4f800000, v5
-; GFX6-NEXT:    v_rcp_f32_e32 v4, v4
-; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s15, v1
-; GFX6-NEXT:    v_cndmask_b32_e32 v5, v6, v7, vcc
-; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
-; GFX6-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GFX6-NEXT:    v_mul_f32_e32 v2, 0x5f7ffffc, v4
-; GFX6-NEXT:    v_mul_f32_e32 v4, 0x2f800000, v2
-; GFX6-NEXT:    v_trunc_f32_e32 v4, v4
-; GFX6-NEXT:    v_mac_f32_e32 v2, 0xcf800000, v4
-; GFX6-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GFX6-NEXT:    v_cvt_u32_f32_e32 v4, v4
-; GFX6-NEXT:    s_sub_u32 s0, 0, s8
-; GFX6-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
-; GFX6-NEXT:    v_mul_hi_u32 v3, s0, v2
-; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v4
-; GFX6-NEXT:    s_subb_u32 s1, 0, s9
-; GFX6-NEXT:    v_mul_lo_u32 v6, s1, v2
-; GFX6-NEXT:    s_ashr_i32 s14, s11, 31
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
-; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v2
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
-; GFX6-NEXT:    v_mul_lo_u32 v6, v2, v3
-; GFX6-NEXT:    v_mul_hi_u32 v7, v2, v5
-; GFX6-NEXT:    v_mul_hi_u32 v8, v2, v3
-; GFX6-NEXT:    v_mul_hi_u32 v9, v4, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, v4, v3
-; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GFX6-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v8, v4, v5
-; GFX6-NEXT:    v_mul_hi_u32 v5, v4, v5
-; GFX6-NEXT:    s_mov_b32 s15, s14
-; GFX6-NEXT:    v_xor_b32_e32 v0, s12, v0
-; GFX6-NEXT:    v_add_i32_e32 v6, vcc, v6, v8
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v7, v5, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, 0, v9, vcc
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v4, v5, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v4, s0, v3
-; GFX6-NEXT:    v_mul_hi_u32 v5, s0, v2
-; GFX6-NEXT:    v_mul_lo_u32 v6, s1, v2
-; GFX6-NEXT:    v_xor_b32_e32 v1, s12, v1
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v5
-; GFX6-NEXT:    v_mul_lo_u32 v5, s0, v2
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v6
-; GFX6-NEXT:    v_mul_lo_u32 v8, v2, v4
-; GFX6-NEXT:    v_mul_hi_u32 v9, v2, v5
-; GFX6-NEXT:    v_mul_hi_u32 v10, v2, v4
-; GFX6-NEXT:    v_mul_hi_u32 v7, v3, v5
-; GFX6-NEXT:    v_mul_lo_u32 v5, v3, v5
-; GFX6-NEXT:    v_mul_hi_u32 v6, v3, v4
-; GFX6-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
-; GFX6-NEXT:    v_addc_u32_e32 v9, vcc, 0, v10, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v4, v3, v4
-; GFX6-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, v9, v7, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v6, vcc
-; GFX6-NEXT:    s_add_u32 s0, s10, s14
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GFX6-NEXT:    s_addc_u32 s1, s11, s14
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v3, v5, vcc
-; GFX6-NEXT:    s_xor_b64 s[10:11], s[0:1], s[14:15]
-; GFX6-NEXT:    v_mul_lo_u32 v4, s10, v3
-; GFX6-NEXT:    v_mul_hi_u32 v5, s10, v2
-; GFX6-NEXT:    v_mul_hi_u32 v7, s10, v3
-; GFX6-NEXT:    v_mul_hi_u32 v8, s11, v3
-; GFX6-NEXT:    v_mul_lo_u32 v3, s11, v3
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
-; GFX6-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v7, s11, v2
-; GFX6-NEXT:    v_mul_hi_u32 v2, s11, v2
-; GFX6-NEXT:    v_mov_b32_e32 v6, s12
-; GFX6-NEXT:    v_add_i32_e32 v4, vcc, v4, v7
-; GFX6-NEXT:    v_addc_u32_e32 v2, vcc, v5, v2, vcc
-; GFX6-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GFX6-NEXT:    v_mul_lo_u32 v3, s8, v3
-; GFX6-NEXT:    v_mul_hi_u32 v4, s8, v2
-; GFX6-NEXT:    v_mul_lo_u32 v5, s9, v2
-; GFX6-NEXT:    v_subrev_i32_e32 v0, vcc, s12, v0
-; GFX6-NEXT:    v_mul_lo_u32 v2, s8, v2
-; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v6, vcc
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
-; GFX6-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
-; GFX6-NEXT:    v_sub_i32_e32 v4, vcc, s11, v3
-; GFX6-NEXT:    v_mov_b32_e32 v5, s9
-; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, s10, v2
-; GFX6-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GFX6-NEXT:    v_subrev_i32_e64 v6, s[0:1], s8, v2
-; GFX6-NEXT:    v_subbrev_u32_e64 v7, s[2:3], 0, v4, s[0:1]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s9, v7
-; GFX6-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[2:3]
-; GFX6-NEXT:    v_cmp_le_u32_e64 s[2:3], s8, v6
-; GFX6-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v9, 0, -1, s[2:3]
-; GFX6-NEXT:    v_cmp_eq_u32_e64 s[2:3], s9, v7
-; GFX6-NEXT:    v_subrev_i32_e64 v5, s[0:1], s8, v6
-; GFX6-NEXT:    v_cndmask_b32_e64 v8, v8, v9, s[2:3]
-; GFX6-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GFX6-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v8
-; GFX6-NEXT:    v_cndmask_b32_e64 v5, v6, v5, s[0:1]
-; GFX6-NEXT:    v_mov_b32_e32 v6, s11
-; GFX6-NEXT:    v_subb_u32_e32 v3, vcc, v6, v3, vcc
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s9, v3
-; GFX6-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_le_u32_e32 vcc, s8, v2
-; GFX6-NEXT:    v_cndmask_b32_e64 v4, v7, v4, s[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
-; GFX6-NEXT:    v_cmp_eq_u32_e32 vcc, s9, v3
-; GFX6-NEXT:    v_cndmask_b32_e32 v6, v6, v7, vcc
-; GFX6-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
-; GFX6-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
-; GFX6-NEXT:    v_cndmask_b32_e32 v3, v3, v4, vcc
-; GFX6-NEXT:    v_xor_b32_e32 v2, s14, v2
-; GFX6-NEXT:    v_xor_b32_e32 v3, s14, v3
-; GFX6-NEXT:    v_mov_b32_e32 v4, s14
-; GFX6-NEXT:    v_subrev_i32_e32 v2, vcc, s14, v2
-; GFX6-NEXT:    v_subb_u32_e32 v3, vcc, v3, v4, vcc
-; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[4:7], 0
+; GFX6-NEXT:    s_add_u32 s2, s11, s2
+; GFX6-NEXT:    s_addc_u32 s10, 0, s10
+; GFX6-NEXT:    s_mul_i32 s11, s9, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX6-NEXT:    s_add_u32 s2, s2, s11
+; GFX6-NEXT:    s_addc_u32 s2, s10, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s10, v0
+; GFX6-NEXT:    s_addc_u32 s10, s10, 0
+; GFX6-NEXT:    s_mul_i32 s11, s9, s15
+; GFX6-NEXT:    s_add_u32 s11, s2, s11
+; GFX6-NEXT:    v_mov_b32_e32 v0, s11
+; GFX6-NEXT:    v_mul_hi_u32 v0, s6, v0
+; GFX6-NEXT:    s_addc_u32 s10, 0, s10
+; GFX6-NEXT:    s_mul_i32 s10, s6, s10
+; GFX6-NEXT:    s_mov_b32 s3, 0xf000
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v0
+; GFX6-NEXT:    s_add_i32 s10, s14, s10
+; GFX6-NEXT:    s_mul_i32 s14, s7, s11
+; GFX6-NEXT:    s_add_i32 s14, s10, s14
+; GFX6-NEXT:    s_sub_i32 s15, s9, s14
+; GFX6-NEXT:    s_mul_i32 s10, s6, s11
+; GFX6-NEXT:    s_sub_i32 s8, s8, s10
+; GFX6-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GFX6-NEXT:    s_or_b32 s16, s10, s11
+; GFX6-NEXT:    s_cmp_lg_u32 s16, 0
+; GFX6-NEXT:    s_subb_u32 s15, s15, s7
+; GFX6-NEXT:    s_sub_i32 s17, s8, s6
+; GFX6-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GFX6-NEXT:    s_or_b32 s10, s10, s11
+; GFX6-NEXT:    s_cmp_lg_u32 s10, 0
+; GFX6-NEXT:    s_subb_u32 s18, s15, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s18, s7
+; GFX6-NEXT:    s_cselect_b32 s11, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s17, s6
+; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s18, s7
+; GFX6-NEXT:    s_cselect_b32 s19, s19, s11
+; GFX6-NEXT:    s_cmp_lg_u32 s10, 0
+; GFX6-NEXT:    s_subb_u32 s15, s15, s7
+; GFX6-NEXT:    s_sub_i32 s20, s17, s6
+; GFX6-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GFX6-NEXT:    s_or_b32 s10, s10, s11
+; GFX6-NEXT:    s_cmp_lg_u32 s10, 0
+; GFX6-NEXT:    s_subb_u32 s10, s15, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s19, 0
+; GFX6-NEXT:    s_cselect_b32 s11, s20, s17
+; GFX6-NEXT:    s_cselect_b32 s10, s10, s18
+; GFX6-NEXT:    s_cmp_lg_u32 s16, 0
+; GFX6-NEXT:    s_subb_u32 s9, s9, s14
+; GFX6-NEXT:    s_cmp_ge_u32 s9, s7
+; GFX6-NEXT:    s_cselect_b32 s14, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s8, s6
+; GFX6-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s9, s7
+; GFX6-NEXT:    s_cselect_b32 s6, s6, s14
+; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX6-NEXT:    s_cselect_b32 s7, s10, s9
+; GFX6-NEXT:    s_cselect_b32 s6, s11, s8
+; GFX6-NEXT:    s_xor_b64 s[6:7], s[6:7], s[4:5]
+; GFX6-NEXT:    s_sub_u32 s5, s6, s4
+; GFX6-NEXT:    s_subb_u32 s4, s7, s4
+; GFX6-NEXT:    s_mov_b32 s2, -1
+; GFX6-NEXT:    v_mov_b32_e32 v0, s12
+; GFX6-NEXT:    v_mov_b32_e32 v1, s13
+; GFX6-NEXT:    v_mov_b32_e32 v2, s5
+; GFX6-NEXT:    v_mov_b32_e32 v3, s4
+; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: srem_v2i64_pow2_shl_denom:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x34
-; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_lshl_b64 s[0:1], 0x1000, s12
-; GFX9-NEXT:    s_lshl_b64 s[14:15], 0x1000, s14
-; GFX9-NEXT:    s_ashr_i32 s2, s1, 31
-; GFX9-NEXT:    s_add_u32 s0, s0, s2
-; GFX9-NEXT:    s_mov_b32 s3, s2
-; GFX9-NEXT:    s_addc_u32 s1, s1, s2
-; GFX9-NEXT:    s_xor_b64 s[12:13], s[0:1], s[2:3]
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s12
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s13
-; GFX9-NEXT:    s_sub_u32 s0, 0, s12
-; GFX9-NEXT:    s_subb_u32 s1, 0, s13
+; GFX9-NEXT:    s_lshl_b64 s[2:3], 0x1000, s12
+; GFX9-NEXT:    s_lshl_b64 s[0:1], 0x1000, s14
+; GFX9-NEXT:    s_ashr_i32 s6, s3, 31
+; GFX9-NEXT:    s_add_u32 s2, s2, s6
+; GFX9-NEXT:    s_mov_b32 s7, s6
+; GFX9-NEXT:    s_addc_u32 s3, s3, s6
+; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s3
+; GFX9-NEXT:    s_sub_u32 s12, 0, s2
+; GFX9-NEXT:    s_subb_u32 s13, 0, s3
 ; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -9627,264 +9812,257 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT:    v_readfirstlane_b32 s2, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s3, v0
-; GFX9-NEXT:    s_mul_i32 s4, s0, s2
-; GFX9-NEXT:    s_mul_hi_u32 s16, s0, s3
-; GFX9-NEXT:    s_mul_i32 s5, s1, s3
-; GFX9-NEXT:    s_add_i32 s4, s16, s4
-; GFX9-NEXT:    s_mul_i32 s17, s0, s3
-; GFX9-NEXT:    s_add_i32 s4, s4, s5
-; GFX9-NEXT:    s_mul_hi_u32 s5, s3, s4
-; GFX9-NEXT:    s_mul_i32 s16, s3, s4
-; GFX9-NEXT:    s_mul_hi_u32 s3, s3, s17
-; GFX9-NEXT:    s_add_u32 s3, s3, s16
-; GFX9-NEXT:    s_addc_u32 s5, 0, s5
-; GFX9-NEXT:    s_mul_hi_u32 s18, s2, s17
-; GFX9-NEXT:    s_mul_i32 s17, s2, s17
-; GFX9-NEXT:    s_add_u32 s3, s3, s17
-; GFX9-NEXT:    s_mul_hi_u32 s16, s2, s4
-; GFX9-NEXT:    s_addc_u32 s3, s5, s18
-; GFX9-NEXT:    s_addc_u32 s5, s16, 0
-; GFX9-NEXT:    s_mul_i32 s4, s2, s4
-; GFX9-NEXT:    s_add_u32 s3, s3, s4
-; GFX9-NEXT:    s_addc_u32 s4, 0, s5
-; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s3, v0
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s2, s2, s4
-; GFX9-NEXT:    v_readfirstlane_b32 s4, v0
-; GFX9-NEXT:    s_mul_i32 s3, s0, s2
-; GFX9-NEXT:    s_mul_hi_u32 s5, s0, s4
-; GFX9-NEXT:    s_add_i32 s3, s5, s3
-; GFX9-NEXT:    s_mul_i32 s1, s1, s4
-; GFX9-NEXT:    s_add_i32 s3, s3, s1
-; GFX9-NEXT:    s_mul_i32 s0, s0, s4
-; GFX9-NEXT:    s_mul_hi_u32 s5, s2, s0
-; GFX9-NEXT:    s_mul_i32 s16, s2, s0
-; GFX9-NEXT:    s_mul_i32 s18, s4, s3
-; GFX9-NEXT:    s_mul_hi_u32 s0, s4, s0
-; GFX9-NEXT:    s_mul_hi_u32 s17, s4, s3
-; GFX9-NEXT:    s_add_u32 s0, s0, s18
-; GFX9-NEXT:    s_addc_u32 s4, 0, s17
-; GFX9-NEXT:    s_add_u32 s0, s0, s16
-; GFX9-NEXT:    s_mul_hi_u32 s1, s2, s3
-; GFX9-NEXT:    s_addc_u32 s0, s4, s5
-; GFX9-NEXT:    s_addc_u32 s1, s1, 0
-; GFX9-NEXT:    s_mul_i32 s3, s2, s3
-; GFX9-NEXT:    s_add_u32 s0, s0, s3
-; GFX9-NEXT:    s_addc_u32 s1, 0, s1
-; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s2, s2, s1
-; GFX9-NEXT:    s_ashr_i32 s16, s9, 31
-; GFX9-NEXT:    s_add_u32 s0, s8, s16
-; GFX9-NEXT:    s_mov_b32 s17, s16
-; GFX9-NEXT:    s_addc_u32 s1, s9, s16
-; GFX9-NEXT:    s_xor_b64 s[4:5], s[0:1], s[16:17]
-; GFX9-NEXT:    v_readfirstlane_b32 s3, v0
-; GFX9-NEXT:    s_mul_i32 s1, s4, s2
-; GFX9-NEXT:    s_mul_hi_u32 s8, s4, s3
-; GFX9-NEXT:    s_mul_hi_u32 s0, s4, s2
-; GFX9-NEXT:    s_add_u32 s1, s8, s1
-; GFX9-NEXT:    s_addc_u32 s0, 0, s0
-; GFX9-NEXT:    s_mul_hi_u32 s9, s5, s3
-; GFX9-NEXT:    s_mul_i32 s3, s5, s3
-; GFX9-NEXT:    s_add_u32 s1, s1, s3
-; GFX9-NEXT:    s_mul_hi_u32 s8, s5, s2
-; GFX9-NEXT:    s_addc_u32 s0, s0, s9
-; GFX9-NEXT:    s_addc_u32 s1, s8, 0
-; GFX9-NEXT:    s_mul_i32 s2, s5, s2
+; GFX9-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX9-NEXT:    s_mul_i32 s7, s12, s14
+; GFX9-NEXT:    s_mul_hi_u32 s16, s12, s6
+; GFX9-NEXT:    s_mul_i32 s15, s13, s6
+; GFX9-NEXT:    s_add_i32 s7, s16, s7
+; GFX9-NEXT:    s_mul_i32 s17, s12, s6
+; GFX9-NEXT:    s_add_i32 s7, s7, s15
+; GFX9-NEXT:    s_mul_hi_u32 s16, s6, s17
+; GFX9-NEXT:    s_mul_i32 s18, s6, s7
+; GFX9-NEXT:    s_mul_hi_u32 s15, s6, s7
+; GFX9-NEXT:    s_add_u32 s16, s16, s18
+; GFX9-NEXT:    s_addc_u32 s15, 0, s15
+; GFX9-NEXT:    s_mul_hi_u32 s18, s14, s17
+; GFX9-NEXT:    s_mul_i32 s17, s14, s17
+; GFX9-NEXT:    s_add_u32 s16, s16, s17
+; GFX9-NEXT:    s_mul_hi_u32 s19, s14, s7
+; GFX9-NEXT:    s_addc_u32 s15, s15, s18
+; GFX9-NEXT:    s_addc_u32 s16, s19, 0
+; GFX9-NEXT:    s_mul_i32 s7, s14, s7
+; GFX9-NEXT:    s_add_u32 s7, s15, s7
+; GFX9-NEXT:    s_addc_u32 s15, 0, s16
+; GFX9-NEXT:    s_add_i32 s16, s6, s7
+; GFX9-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX9-NEXT:    s_addc_u32 s14, s14, s15
+; GFX9-NEXT:    s_mul_i32 s6, s12, s14
+; GFX9-NEXT:    s_mul_hi_u32 s7, s12, s16
+; GFX9-NEXT:    s_add_i32 s6, s7, s6
+; GFX9-NEXT:    s_mul_i32 s13, s13, s16
+; GFX9-NEXT:    s_add_i32 s6, s6, s13
+; GFX9-NEXT:    s_mul_i32 s12, s12, s16
+; GFX9-NEXT:    s_mul_hi_u32 s13, s14, s12
+; GFX9-NEXT:    s_mul_i32 s15, s14, s12
+; GFX9-NEXT:    s_mul_i32 s18, s16, s6
+; GFX9-NEXT:    s_mul_hi_u32 s12, s16, s12
+; GFX9-NEXT:    s_mul_hi_u32 s17, s16, s6
+; GFX9-NEXT:    s_add_u32 s12, s12, s18
+; GFX9-NEXT:    s_addc_u32 s17, 0, s17
+; GFX9-NEXT:    s_add_u32 s12, s12, s15
+; GFX9-NEXT:    s_mul_hi_u32 s7, s14, s6
+; GFX9-NEXT:    s_addc_u32 s12, s17, s13
+; GFX9-NEXT:    s_addc_u32 s7, s7, 0
+; GFX9-NEXT:    s_mul_i32 s6, s14, s6
+; GFX9-NEXT:    s_add_u32 s6, s12, s6
+; GFX9-NEXT:    s_addc_u32 s12, 0, s7
+; GFX9-NEXT:    s_add_i32 s16, s16, s6
+; GFX9-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX9-NEXT:    s_addc_u32 s12, s14, s12
+; GFX9-NEXT:    s_ashr_i32 s6, s9, 31
+; GFX9-NEXT:    s_add_u32 s8, s8, s6
+; GFX9-NEXT:    s_mov_b32 s7, s6
+; GFX9-NEXT:    s_addc_u32 s9, s9, s6
+; GFX9-NEXT:    s_xor_b64 s[8:9], s[8:9], s[6:7]
+; GFX9-NEXT:    s_mul_i32 s14, s8, s12
+; GFX9-NEXT:    s_mul_hi_u32 s15, s8, s16
+; GFX9-NEXT:    s_mul_hi_u32 s13, s8, s12
+; GFX9-NEXT:    s_add_u32 s14, s15, s14
+; GFX9-NEXT:    s_addc_u32 s13, 0, s13
+; GFX9-NEXT:    s_mul_hi_u32 s17, s9, s16
+; GFX9-NEXT:    s_mul_i32 s16, s9, s16
+; GFX9-NEXT:    s_add_u32 s14, s14, s16
+; GFX9-NEXT:    s_mul_hi_u32 s15, s9, s12
+; GFX9-NEXT:    s_addc_u32 s13, s13, s17
+; GFX9-NEXT:    s_addc_u32 s14, s15, 0
+; GFX9-NEXT:    s_mul_i32 s12, s9, s12
+; GFX9-NEXT:    s_add_u32 s12, s13, s12
+; GFX9-NEXT:    s_addc_u32 s13, 0, s14
+; GFX9-NEXT:    s_mul_i32 s13, s2, s13
+; GFX9-NEXT:    s_mul_hi_u32 s14, s2, s12
+; GFX9-NEXT:    s_add_i32 s13, s14, s13
+; GFX9-NEXT:    s_mul_i32 s14, s3, s12
+; GFX9-NEXT:    s_add_i32 s16, s13, s14
+; GFX9-NEXT:    s_sub_i32 s14, s9, s16
+; GFX9-NEXT:    s_mul_i32 s12, s2, s12
+; GFX9-NEXT:    s_sub_i32 s8, s8, s12
+; GFX9-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
+; GFX9-NEXT:    s_subb_u32 s17, s14, s3
+; GFX9-NEXT:    s_sub_i32 s18, s8, s2
+; GFX9-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; GFX9-NEXT:    s_subb_u32 s19, s17, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s19, s3
+; GFX9-NEXT:    s_cselect_b32 s20, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s18, s2
+; GFX9-NEXT:    s_cselect_b32 s21, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s19, s3
+; GFX9-NEXT:    s_cselect_b32 s20, s21, s20
+; GFX9-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; GFX9-NEXT:    s_subb_u32 s17, s17, s3
+; GFX9-NEXT:    s_sub_i32 s21, s18, s2
+; GFX9-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; GFX9-NEXT:    s_subb_u32 s14, s17, 0
+; GFX9-NEXT:    s_cmp_lg_u32 s20, 0
+; GFX9-NEXT:    s_cselect_b32 s15, s21, s18
+; GFX9-NEXT:    s_cselect_b32 s14, s14, s19
+; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
+; GFX9-NEXT:    s_subb_u32 s9, s9, s16
+; GFX9-NEXT:    s_cmp_ge_u32 s9, s3
+; GFX9-NEXT:    s_cselect_b32 s12, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s8, s2
+; GFX9-NEXT:    s_cselect_b32 s2, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s9, s3
+; GFX9-NEXT:    s_cselect_b32 s2, s2, s12
+; GFX9-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX9-NEXT:    s_cselect_b32 s3, s14, s9
+; GFX9-NEXT:    s_cselect_b32 s2, s15, s8
+; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
+; GFX9-NEXT:    s_sub_u32 s12, s2, s6
+; GFX9-NEXT:    s_subb_u32 s13, s3, s6
+; GFX9-NEXT:    s_ashr_i32 s2, s1, 31
 ; GFX9-NEXT:    s_add_u32 s0, s0, s2
-; GFX9-NEXT:    s_addc_u32 s1, 0, s1
-; GFX9-NEXT:    s_mul_i32 s1, s12, s1
-; GFX9-NEXT:    s_mul_hi_u32 s2, s12, s0
-; GFX9-NEXT:    s_add_i32 s1, s2, s1
-; GFX9-NEXT:    s_mul_i32 s2, s13, s0
-; GFX9-NEXT:    s_mul_i32 s0, s12, s0
-; GFX9-NEXT:    s_add_i32 s8, s1, s2
-; GFX9-NEXT:    v_mov_b32_e32 v0, s0
-; GFX9-NEXT:    s_sub_i32 s1, s5, s8
-; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s4, v0
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_subb_u32 s4, s1, s13
-; GFX9-NEXT:    v_subrev_co_u32_e64 v1, s[0:1], s12, v0
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    s_subb_u32 s9, s4, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s9, s13
-; GFX9-NEXT:    s_cselect_b32 s17, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v1
-; GFX9-NEXT:    s_cmp_eq_u32 s9, s13
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, 0, -1, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v3, s17
-; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v3, v2, s[2:3]
-; GFX9-NEXT:    s_subb_u32 s2, s4, s13
-; GFX9-NEXT:    v_subrev_co_u32_e64 v3, s[0:1], s12, v1
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    s_subb_u32 s2, s2, 0
-; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s9
-; GFX9-NEXT:    v_mov_b32_e32 v3, s2
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
-; GFX9-NEXT:    s_subb_u32 s0, s5, s8
-; GFX9-NEXT:    s_cmp_ge_u32 s0, s13
-; GFX9-NEXT:    s_cselect_b32 s1, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
-; GFX9-NEXT:    s_cmp_eq_u32 s0, s13
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v5, s1
-; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v5, s0
-; GFX9-NEXT:    s_ashr_i32 s0, s15, 31
-; GFX9-NEXT:    s_add_u32 s2, s14, s0
-; GFX9-NEXT:    s_mov_b32 s1, s0
-; GFX9-NEXT:    s_addc_u32 s3, s15, s0
-; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
-; GFX9-NEXT:    s_xor_b64 s[4:5], s[2:3], s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s4
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v3, s5
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v5, v2, vcc
-; GFX9-NEXT:    v_xor_b32_e32 v0, s16, v0
-; GFX9-NEXT:    v_xor_b32_e32 v2, s16, v2
-; GFX9-NEXT:    v_mac_f32_e32 v1, 0x4f800000, v3
-; GFX9-NEXT:    v_rcp_f32_e32 v3, v1
-; GFX9-NEXT:    v_mov_b32_e32 v5, s16
-; GFX9-NEXT:    v_subrev_co_u32_e32 v0, vcc, s16, v0
-; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v2, v5, vcc
-; GFX9-NEXT:    v_mul_f32_e32 v2, 0x5f7ffffc, v3
-; GFX9-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v2
-; GFX9-NEXT:    v_trunc_f32_e32 v3, v3
-; GFX9-NEXT:    v_mac_f32_e32 v2, 0xcf800000, v3
+; GFX9-NEXT:    s_mov_b32 s3, s2
+; GFX9-NEXT:    s_addc_u32 s1, s1, s2
+; GFX9-NEXT:    s_xor_b64 s[2:3], s[0:1], s[2:3]
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s3
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX9-NEXT:    s_sub_u32 s6, 0, s2
+; GFX9-NEXT:    s_subb_u32 s7, 0, s3
+; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
+; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0
+; GFX9-NEXT:    v_mul_f32_e32 v1, 0x5f7ffffc, v1
+; GFX9-NEXT:    v_mul_f32_e32 v2, 0x2f800000, v1
+; GFX9-NEXT:    v_trunc_f32_e32 v2, v2
+; GFX9-NEXT:    v_mac_f32_e32 v1, 0xcf800000, v2
+; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GFX9-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GFX9-NEXT:    s_sub_u32 s0, 0, s4
-; GFX9-NEXT:    s_subb_u32 s1, 0, s5
-; GFX9-NEXT:    v_readfirstlane_b32 s2, v2
-; GFX9-NEXT:    v_readfirstlane_b32 s9, v3
-; GFX9-NEXT:    s_mul_hi_u32 s8, s0, s2
-; GFX9-NEXT:    s_mul_i32 s12, s0, s9
-; GFX9-NEXT:    s_mul_i32 s3, s1, s2
-; GFX9-NEXT:    s_add_i32 s8, s8, s12
-; GFX9-NEXT:    s_add_i32 s8, s8, s3
-; GFX9-NEXT:    s_mul_i32 s13, s0, s2
-; GFX9-NEXT:    s_mul_hi_u32 s3, s2, s8
-; GFX9-NEXT:    s_mul_i32 s12, s2, s8
-; GFX9-NEXT:    s_mul_hi_u32 s2, s2, s13
-; GFX9-NEXT:    s_add_u32 s2, s2, s12
-; GFX9-NEXT:    s_addc_u32 s3, 0, s3
-; GFX9-NEXT:    s_mul_hi_u32 s14, s9, s13
-; GFX9-NEXT:    s_mul_i32 s13, s9, s13
-; GFX9-NEXT:    s_add_u32 s2, s2, s13
-; GFX9-NEXT:    s_mul_hi_u32 s12, s9, s8
-; GFX9-NEXT:    s_addc_u32 s2, s3, s14
-; GFX9-NEXT:    s_addc_u32 s3, s12, 0
+; GFX9-NEXT:    v_readfirstlane_b32 s4, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s9, v2
+; GFX9-NEXT:    s_mul_hi_u32 s8, s6, s4
+; GFX9-NEXT:    s_mul_i32 s14, s6, s9
+; GFX9-NEXT:    s_mul_i32 s5, s7, s4
+; GFX9-NEXT:    s_add_i32 s8, s8, s14
+; GFX9-NEXT:    s_add_i32 s8, s8, s5
+; GFX9-NEXT:    s_mul_i32 s15, s6, s4
+; GFX9-NEXT:    s_mul_i32 s14, s4, s8
+; GFX9-NEXT:    s_mul_hi_u32 s16, s4, s15
+; GFX9-NEXT:    s_mul_hi_u32 s5, s4, s8
+; GFX9-NEXT:    s_add_u32 s14, s16, s14
+; GFX9-NEXT:    s_addc_u32 s5, 0, s5
+; GFX9-NEXT:    s_mul_hi_u32 s17, s9, s15
+; GFX9-NEXT:    s_mul_i32 s15, s9, s15
+; GFX9-NEXT:    s_add_u32 s14, s14, s15
+; GFX9-NEXT:    s_mul_hi_u32 s16, s9, s8
+; GFX9-NEXT:    s_addc_u32 s5, s5, s17
+; GFX9-NEXT:    s_addc_u32 s14, s16, 0
 ; GFX9-NEXT:    s_mul_i32 s8, s9, s8
-; GFX9-NEXT:    s_add_u32 s2, s2, s8
-; GFX9-NEXT:    s_addc_u32 s3, 0, s3
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s2, v2
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s2, s9, s3
-; GFX9-NEXT:    v_readfirstlane_b32 s8, v2
-; GFX9-NEXT:    s_mul_i32 s3, s0, s2
-; GFX9-NEXT:    s_mul_hi_u32 s9, s0, s8
-; GFX9-NEXT:    s_add_i32 s3, s9, s3
-; GFX9-NEXT:    s_mul_i32 s1, s1, s8
-; GFX9-NEXT:    s_add_i32 s3, s3, s1
-; GFX9-NEXT:    s_mul_i32 s0, s0, s8
-; GFX9-NEXT:    s_mul_hi_u32 s9, s2, s0
-; GFX9-NEXT:    s_mul_i32 s12, s2, s0
-; GFX9-NEXT:    s_mul_i32 s14, s8, s3
-; GFX9-NEXT:    s_mul_hi_u32 s0, s8, s0
-; GFX9-NEXT:    s_mul_hi_u32 s13, s8, s3
-; GFX9-NEXT:    s_add_u32 s0, s0, s14
-; GFX9-NEXT:    s_addc_u32 s8, 0, s13
-; GFX9-NEXT:    s_add_u32 s0, s0, s12
-; GFX9-NEXT:    s_mul_hi_u32 s1, s2, s3
-; GFX9-NEXT:    s_addc_u32 s0, s8, s9
-; GFX9-NEXT:    s_addc_u32 s1, s1, 0
-; GFX9-NEXT:    s_mul_i32 s3, s2, s3
-; GFX9-NEXT:    s_add_u32 s0, s0, s3
-; GFX9-NEXT:    s_addc_u32 s1, 0, s1
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, s0, v2
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s2, s2, s1
-; GFX9-NEXT:    s_ashr_i32 s8, s11, 31
-; GFX9-NEXT:    s_add_u32 s0, s10, s8
-; GFX9-NEXT:    s_mov_b32 s9, s8
-; GFX9-NEXT:    s_addc_u32 s1, s11, s8
-; GFX9-NEXT:    s_xor_b64 s[10:11], s[0:1], s[8:9]
-; GFX9-NEXT:    v_readfirstlane_b32 s3, v2
-; GFX9-NEXT:    s_mul_i32 s1, s10, s2
-; GFX9-NEXT:    s_mul_hi_u32 s9, s10, s3
-; GFX9-NEXT:    s_mul_hi_u32 s0, s10, s2
-; GFX9-NEXT:    s_add_u32 s1, s9, s1
-; GFX9-NEXT:    s_addc_u32 s0, 0, s0
-; GFX9-NEXT:    s_mul_hi_u32 s12, s11, s3
-; GFX9-NEXT:    s_mul_i32 s3, s11, s3
-; GFX9-NEXT:    s_add_u32 s1, s1, s3
-; GFX9-NEXT:    s_mul_hi_u32 s9, s11, s2
-; GFX9-NEXT:    s_addc_u32 s0, s0, s12
-; GFX9-NEXT:    s_addc_u32 s1, s9, 0
-; GFX9-NEXT:    s_mul_i32 s2, s11, s2
-; GFX9-NEXT:    s_add_u32 s0, s0, s2
-; GFX9-NEXT:    s_addc_u32 s1, 0, s1
-; GFX9-NEXT:    s_mul_i32 s1, s4, s1
-; GFX9-NEXT:    s_mul_hi_u32 s2, s4, s0
-; GFX9-NEXT:    s_add_i32 s1, s2, s1
-; GFX9-NEXT:    s_mul_i32 s2, s5, s0
-; GFX9-NEXT:    s_mul_i32 s0, s4, s0
-; GFX9-NEXT:    s_add_i32 s9, s1, s2
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    s_sub_i32 s1, s11, s9
-; GFX9-NEXT:    v_sub_co_u32_e32 v2, vcc, s10, v2
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_subb_u32 s10, s1, s5
-; GFX9-NEXT:    v_subrev_co_u32_e64 v3, s[0:1], s4, v2
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    s_subb_u32 s12, s10, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s12, s5
-; GFX9-NEXT:    s_cselect_b32 s13, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[2:3], s4, v3
-; GFX9-NEXT:    s_cmp_eq_u32 s12, s5
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[2:3]
-; GFX9-NEXT:    v_mov_b32_e32 v6, s13
-; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v6, v5, s[2:3]
-; GFX9-NEXT:    s_subb_u32 s2, s10, s5
-; GFX9-NEXT:    v_subrev_co_u32_e64 v6, s[0:1], s4, v3
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    s_subb_u32 s2, s2, 0
-; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v5
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v6, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v5, s12
-; GFX9-NEXT:    v_mov_b32_e32 v6, s2
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v5, v6, s[0:1]
-; GFX9-NEXT:    s_subb_u32 s0, s11, s9
-; GFX9-NEXT:    s_cmp_ge_u32 s0, s5
-; GFX9-NEXT:    s_cselect_b32 s1, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s4, v2
-; GFX9-NEXT:    s_cmp_eq_u32 s0, s5
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v7, s1
-; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT:    v_cndmask_b32_e32 v6, v7, v6, vcc
-; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
-; GFX9-NEXT:    v_mov_b32_e32 v7, s0
-; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v5, v7, v5, vcc
-; GFX9-NEXT:    v_xor_b32_e32 v2, s8, v2
-; GFX9-NEXT:    v_xor_b32_e32 v3, s8, v5
-; GFX9-NEXT:    v_mov_b32_e32 v5, s8
-; GFX9-NEXT:    v_subrev_co_u32_e32 v2, vcc, s8, v2
-; GFX9-NEXT:    v_subb_co_u32_e32 v3, vcc, v3, v5, vcc
-; GFX9-NEXT:    global_store_dwordx4 v4, v[0:3], s[6:7]
+; GFX9-NEXT:    s_add_u32 s5, s5, s8
+; GFX9-NEXT:    s_addc_u32 s8, 0, s14
+; GFX9-NEXT:    s_add_i32 s14, s4, s5
+; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_addc_u32 s8, s9, s8
+; GFX9-NEXT:    s_mul_i32 s4, s6, s8
+; GFX9-NEXT:    s_mul_hi_u32 s5, s6, s14
+; GFX9-NEXT:    s_add_i32 s4, s5, s4
+; GFX9-NEXT:    s_mul_i32 s7, s7, s14
+; GFX9-NEXT:    s_add_i32 s4, s4, s7
+; GFX9-NEXT:    s_mul_i32 s6, s6, s14
+; GFX9-NEXT:    s_mul_hi_u32 s7, s8, s6
+; GFX9-NEXT:    s_mul_i32 s9, s8, s6
+; GFX9-NEXT:    s_mul_i32 s16, s14, s4
+; GFX9-NEXT:    s_mul_hi_u32 s6, s14, s6
+; GFX9-NEXT:    s_mul_hi_u32 s15, s14, s4
+; GFX9-NEXT:    s_add_u32 s6, s6, s16
+; GFX9-NEXT:    s_addc_u32 s15, 0, s15
+; GFX9-NEXT:    s_add_u32 s6, s6, s9
+; GFX9-NEXT:    s_mul_hi_u32 s5, s8, s4
+; GFX9-NEXT:    s_addc_u32 s6, s15, s7
+; GFX9-NEXT:    s_addc_u32 s5, s5, 0
+; GFX9-NEXT:    s_mul_i32 s4, s8, s4
+; GFX9-NEXT:    s_add_u32 s4, s6, s4
+; GFX9-NEXT:    s_addc_u32 s6, 0, s5
+; GFX9-NEXT:    s_add_i32 s14, s14, s4
+; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_addc_u32 s8, s8, s6
+; GFX9-NEXT:    s_ashr_i32 s4, s11, 31
+; GFX9-NEXT:    s_add_u32 s6, s10, s4
+; GFX9-NEXT:    s_mov_b32 s5, s4
+; GFX9-NEXT:    s_addc_u32 s7, s11, s4
+; GFX9-NEXT:    s_xor_b64 s[6:7], s[6:7], s[4:5]
+; GFX9-NEXT:    s_mul_i32 s10, s6, s8
+; GFX9-NEXT:    s_mul_hi_u32 s11, s6, s14
+; GFX9-NEXT:    s_mul_hi_u32 s9, s6, s8
+; GFX9-NEXT:    s_add_u32 s10, s11, s10
+; GFX9-NEXT:    s_addc_u32 s9, 0, s9
+; GFX9-NEXT:    s_mul_hi_u32 s15, s7, s14
+; GFX9-NEXT:    s_mul_i32 s14, s7, s14
+; GFX9-NEXT:    s_add_u32 s10, s10, s14
+; GFX9-NEXT:    s_mul_hi_u32 s11, s7, s8
+; GFX9-NEXT:    s_addc_u32 s9, s9, s15
+; GFX9-NEXT:    s_addc_u32 s10, s11, 0
+; GFX9-NEXT:    s_mul_i32 s8, s7, s8
+; GFX9-NEXT:    s_add_u32 s8, s9, s8
+; GFX9-NEXT:    s_addc_u32 s9, 0, s10
+; GFX9-NEXT:    s_mul_i32 s9, s2, s9
+; GFX9-NEXT:    s_mul_hi_u32 s10, s2, s8
+; GFX9-NEXT:    s_add_i32 s9, s10, s9
+; GFX9-NEXT:    s_mul_i32 s10, s3, s8
+; GFX9-NEXT:    s_add_i32 s14, s9, s10
+; GFX9-NEXT:    s_sub_i32 s10, s7, s14
+; GFX9-NEXT:    s_mul_i32 s8, s2, s8
+; GFX9-NEXT:    s_sub_i32 s6, s6, s8
+; GFX9-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX9-NEXT:    s_subb_u32 s15, s10, s3
+; GFX9-NEXT:    s_sub_i32 s16, s6, s2
+; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GFX9-NEXT:    s_subb_u32 s17, s15, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s17, s3
+; GFX9-NEXT:    s_cselect_b32 s18, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s16, s2
+; GFX9-NEXT:    s_cselect_b32 s19, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s17, s3
+; GFX9-NEXT:    s_cselect_b32 s18, s19, s18
+; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GFX9-NEXT:    s_subb_u32 s15, s15, s3
+; GFX9-NEXT:    s_sub_i32 s19, s16, s2
+; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GFX9-NEXT:    s_subb_u32 s10, s15, 0
+; GFX9-NEXT:    s_cmp_lg_u32 s18, 0
+; GFX9-NEXT:    s_cselect_b32 s11, s19, s16
+; GFX9-NEXT:    s_cselect_b32 s10, s10, s17
+; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX9-NEXT:    s_subb_u32 s7, s7, s14
+; GFX9-NEXT:    s_cmp_ge_u32 s7, s3
+; GFX9-NEXT:    s_cselect_b32 s8, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s6, s2
+; GFX9-NEXT:    s_cselect_b32 s2, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s7, s3
+; GFX9-NEXT:    s_cselect_b32 s2, s2, s8
+; GFX9-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX9-NEXT:    s_cselect_b32 s3, s10, s7
+; GFX9-NEXT:    s_cselect_b32 s2, s11, s6
+; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
+; GFX9-NEXT:    s_sub_u32 s2, s2, s4
+; GFX9-NEXT:    s_subb_u32 s3, s3, s4
+; GFX9-NEXT:    v_mov_b32_e32 v1, s12
+; GFX9-NEXT:    v_mov_b32_e32 v2, s13
+; GFX9-NEXT:    v_mov_b32_e32 v3, s2
+; GFX9-NEXT:    v_mov_b32_e32 v4, s3
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    global_store_dwordx4 v0, v[1:4], s[0:1]
 ; GFX9-NEXT:    s_endpgm
   %shl.y = shl <2 x i64> <i64 4096, i64 4096>, %y
   %r = srem <2 x i64> %x, %shl.y
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index 1158d73c0c152..1211a9f4363fe 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -1961,9 +1961,9 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GCN-ISEL-LABEL: name:   sudiv64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.3
-; GCN-ISEL: %[[CARRY:[0-9]+]]:sreg_64_xexec = V_ADD_CO_U32_e64
+; GCN-ISEL: %[[CARRY:[0-9]+]]:sreg_64_xexec = S_UADDO_PSEUDO
 ; GCN-ISEL: S_ADD_CO_PSEUDO %{{[0-9]+}}, killed %{{[0-9]+}}, killed %[[CARRY]]
-; GCN-ISEL: %[[CARRY:[0-9]+]]:sreg_64_xexec = V_SUB_CO_U32_e64
+; GCN-ISEL: %[[CARRY:[0-9]+]]:sreg_64_xexec = S_USUBO_PSEUDO
 ; GCN-ISEL: S_SUB_CO_PSEUDO killed %{{[0-9]+}}, %{{[0-9]+}}, %[[CARRY]]
 
 define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
@@ -2017,7 +2017,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; CISI-NEXT:    v_mul_lo_u32 v4, s1, v0
 ; CISI-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; CISI-NEXT:    v_mul_lo_u32 v3, s0, v0
-; CISI-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
+; CISI-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
 ; CISI-NEXT:    v_mul_lo_u32 v6, v0, v2
 ; CISI-NEXT:    v_mul_hi_u32 v7, v0, v3
 ; CISI-NEXT:    v_mul_hi_u32 v8, v0, v2
@@ -2120,18 +2120,18 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ;
 ; VI-LABEL: sudiv64:
 ; VI:       ; %bb.0:
-; VI-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
-; VI-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x34
+; VI-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
-; VI-NEXT:    s_or_b64 s[0:1], s[10:11], s[2:3]
-; VI-NEXT:    s_mov_b32 s0, 0
-; VI-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; VI-NEXT:    s_cbranch_scc0 .LBB16_4
+; VI-NEXT:    s_or_b64 s[6:7], s[2:3], s[4:5]
+; VI-NEXT:    s_mov_b32 s6, 0
+; VI-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; VI-NEXT:    s_cbranch_scc0 .LBB16_3
 ; VI-NEXT:  ; %bb.1:
-; VI-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; VI-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; VI-NEXT:    s_sub_u32 s4, 0, s2
-; VI-NEXT:    s_subb_u32 s5, 0, s3
+; VI-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; VI-NEXT:    s_sub_u32 s8, 0, s4
+; VI-NEXT:    s_subb_u32 s9, 0, s5
 ; VI-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; VI-NEXT:    v_rcp_f32_e32 v0, v0
 ; VI-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2140,17 +2140,17 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; VI-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; VI-NEXT:    v_cvt_u32_f32_e32 v4, v1
 ; VI-NEXT:    v_cvt_u32_f32_e32 v5, v0
-; VI-NEXT:    v_mul_lo_u32 v2, s4, v4
-; VI-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], s4, v5, 0
-; VI-NEXT:    v_mul_lo_u32 v3, s5, v5
+; VI-NEXT:    v_mul_lo_u32 v2, s8, v4
+; VI-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], s8, v5, 0
+; VI-NEXT:    v_mul_lo_u32 v3, s9, v5
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
 ; VI-NEXT:    v_add_u32_e32 v3, vcc, v1, v3
 ; VI-NEXT:    v_mul_hi_u32 v6, v5, v0
-; VI-NEXT:    v_mad_u64_u32 v[1:2], s[0:1], v5, v3, 0
+; VI-NEXT:    v_mad_u64_u32 v[1:2], s[6:7], v5, v3, 0
 ; VI-NEXT:    v_add_u32_e32 v6, vcc, v6, v1
-; VI-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], v4, v0, 0
+; VI-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v4, v0, 0
 ; VI-NEXT:    v_addc_u32_e32 v7, vcc, 0, v2, vcc
-; VI-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], v4, v3, 0
+; VI-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v4, v3, 0
 ; VI-NEXT:    v_add_u32_e32 v0, vcc, v6, v0
 ; VI-NEXT:    v_addc_u32_e32 v0, vcc, v7, v1, vcc
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
@@ -2158,15 +2158,15 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; VI-NEXT:    v_add_u32_e32 v6, vcc, v5, v0
 ; VI-NEXT:    v_addc_u32_e32 v7, vcc, v4, v1, vcc
-; VI-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], s4, v6, 0
-; VI-NEXT:    v_mul_lo_u32 v4, s4, v7
-; VI-NEXT:    v_mul_lo_u32 v5, s5, v6
+; VI-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], s8, v6, 0
+; VI-NEXT:    v_mul_lo_u32 v4, s8, v7
+; VI-NEXT:    v_mul_lo_u32 v5, s9, v6
 ; VI-NEXT:    v_mul_hi_u32 v8, v6, v0
-; VI-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], v7, v0, 0
+; VI-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], v7, v0, 0
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, v4, v1
-; VI-NEXT:    v_add_u32_e32 v1, vcc, v1, v5
-; VI-NEXT:    v_mad_u64_u32 v[4:5], s[0:1], v6, v1, 0
-; VI-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], v7, v1, 0
+; VI-NEXT:    v_add_u32_e32 v1, vcc, v5, v1
+; VI-NEXT:    v_mad_u64_u32 v[4:5], s[6:7], v6, v1, 0
+; VI-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], v7, v1, 0
 ; VI-NEXT:    v_add_u32_e32 v4, vcc, v8, v4
 ; VI-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
 ; VI-NEXT:    v_add_u32_e32 v2, vcc, v4, v2
@@ -2176,119 +2176,117 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; VI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; VI-NEXT:    v_add_u32_e32 v2, vcc, v6, v0
 ; VI-NEXT:    v_addc_u32_e32 v3, vcc, v7, v1, vcc
-; VI-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], s10, v3, 0
-; VI-NEXT:    v_mul_hi_u32 v4, s10, v2
-; VI-NEXT:    v_readfirstlane_b32 s4, v1
-; VI-NEXT:    v_readfirstlane_b32 s5, v0
-; VI-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], s11, v3, 0
-; VI-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], s11, v2, 0
-; VI-NEXT:    v_readfirstlane_b32 s6, v4
-; VI-NEXT:    s_add_u32 s0, s6, s5
-; VI-NEXT:    s_addc_u32 s1, 0, s4
-; VI-NEXT:    v_readfirstlane_b32 s6, v2
-; VI-NEXT:    v_readfirstlane_b32 s5, v3
-; VI-NEXT:    s_add_u32 s0, s0, s6
-; VI-NEXT:    v_readfirstlane_b32 s4, v1
-; VI-NEXT:    s_addc_u32 s0, s1, s5
-; VI-NEXT:    s_addc_u32 s6, s4, 0
-; VI-NEXT:    v_readfirstlane_b32 s1, v0
-; VI-NEXT:    s_add_u32 s7, s0, s1
-; VI-NEXT:    v_mov_b32_e32 v2, s7
-; VI-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], s2, v2, 0
-; VI-NEXT:    s_addc_u32 s6, 0, s6
-; VI-NEXT:    s_mul_i32 s0, s2, s6
-; VI-NEXT:    v_readfirstlane_b32 s1, v1
-; VI-NEXT:    s_add_i32 s0, s1, s0
-; VI-NEXT:    s_mul_i32 s1, s3, s7
-; VI-NEXT:    s_add_i32 s12, s0, s1
-; VI-NEXT:    s_sub_i32 s0, s11, s12
-; VI-NEXT:    v_sub_u32_e32 v0, vcc, s10, v0
-; VI-NEXT:    s_cmp_lg_u64 vcc, 0
-; VI-NEXT:    s_subb_u32 s13, s0, s3
-; VI-NEXT:    v_subrev_u32_e64 v1, s[0:1], s2, v0
-; VI-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; VI-NEXT:    s_subb_u32 s13, s13, 0
-; VI-NEXT:    s_cmp_ge_u32 s13, s3
-; VI-NEXT:    s_cselect_b32 s14, -1, 0
-; VI-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v1
-; VI-NEXT:    s_cmp_eq_u32 s13, s3
-; VI-NEXT:    v_cndmask_b32_e64 v1, 0, -1, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v3, s14
-; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; VI-NEXT:    v_cndmask_b32_e64 v1, v3, v1, s[0:1]
-; VI-NEXT:    s_add_u32 s0, s7, 1
-; VI-NEXT:    s_addc_u32 s13, s6, 0
-; VI-NEXT:    s_add_u32 s1, s7, 2
-; VI-NEXT:    s_addc_u32 s7, s6, 0
-; VI-NEXT:    v_mov_b32_e32 v3, s0
-; VI-NEXT:    v_mov_b32_e32 v4, s1
-; VI-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v1
-; VI-NEXT:    v_cndmask_b32_e64 v3, v3, v4, s[0:1]
-; VI-NEXT:    v_mov_b32_e32 v1, s13
-; VI-NEXT:    v_mov_b32_e32 v4, s7
-; VI-NEXT:    s_cmp_lg_u64 vcc, 0
-; VI-NEXT:    v_cndmask_b32_e64 v1, v1, v4, s[0:1]
-; VI-NEXT:    s_subb_u32 s0, s11, s12
-; VI-NEXT:    s_cmp_ge_u32 s0, s3
-; VI-NEXT:    s_cselect_b32 s1, -1, 0
-; VI-NEXT:    v_cmp_le_u32_e32 vcc, s2, v0
-; VI-NEXT:    s_cmp_eq_u32 s0, s3
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; VI-NEXT:    v_mov_b32_e32 v4, s1
-; VI-NEXT:    s_cselect_b64 vcc, -1, 0
-; VI-NEXT:    v_cndmask_b32_e32 v0, v4, v0, vcc
-; VI-NEXT:    v_mov_b32_e32 v4, s6
-; VI-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT:    v_cndmask_b32_e32 v1, v4, v1, vcc
-; VI-NEXT:    v_cndmask_b32_e32 v0, v2, v3, vcc
-; VI-NEXT:    s_cbranch_execnz .LBB16_3
+; VI-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], s2, v3, 0
+; VI-NEXT:    v_mul_hi_u32 v4, s2, v2
+; VI-NEXT:    v_readfirstlane_b32 s8, v1
+; VI-NEXT:    v_readfirstlane_b32 s9, v0
+; VI-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], s3, v3, 0
+; VI-NEXT:    v_mad_u64_u32 v[2:3], s[6:7], s3, v2, 0
+; VI-NEXT:    v_readfirstlane_b32 s10, v4
+; VI-NEXT:    s_add_u32 s6, s10, s9
+; VI-NEXT:    s_addc_u32 s7, 0, s8
+; VI-NEXT:    v_readfirstlane_b32 s10, v2
+; VI-NEXT:    v_readfirstlane_b32 s9, v3
+; VI-NEXT:    s_add_u32 s6, s6, s10
+; VI-NEXT:    v_readfirstlane_b32 s8, v1
+; VI-NEXT:    s_addc_u32 s6, s7, s9
+; VI-NEXT:    s_addc_u32 s8, s8, 0
+; VI-NEXT:    v_readfirstlane_b32 s7, v0
+; VI-NEXT:    s_add_u32 s12, s6, s7
+; VI-NEXT:    v_mov_b32_e32 v0, s12
+; VI-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], s4, v0, 0
+; VI-NEXT:    s_addc_u32 s13, 0, s8
+; VI-NEXT:    s_mul_i32 s8, s4, s13
+; VI-NEXT:    v_readfirstlane_b32 s9, v1
+; VI-NEXT:    s_add_i32 s8, s9, s8
+; VI-NEXT:    s_mul_i32 s9, s5, s12
+; VI-NEXT:    s_add_i32 s14, s8, s9
+; VI-NEXT:    s_sub_i32 s10, s3, s14
+; VI-NEXT:    v_readfirstlane_b32 s8, v0
+; VI-NEXT:    s_sub_i32 s15, s2, s8
+; VI-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; VI-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; VI-NEXT:    s_subb_u32 s16, s10, s5
+; VI-NEXT:    s_sub_i32 s17, s15, s4
+; VI-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; VI-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; VI-NEXT:    s_subb_u32 s10, s16, 0
+; VI-NEXT:    s_cmp_ge_u32 s10, s5
+; VI-NEXT:    s_cselect_b32 s11, -1, 0
+; VI-NEXT:    s_cmp_ge_u32 s17, s4
+; VI-NEXT:    s_cselect_b32 s16, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s10, s5
+; VI-NEXT:    s_cselect_b32 s10, s16, s11
+; VI-NEXT:    s_add_u32 s11, s12, 1
+; VI-NEXT:    s_addc_u32 s16, s13, 0
+; VI-NEXT:    s_add_u32 s17, s12, 2
+; VI-NEXT:    s_addc_u32 s18, s13, 0
+; VI-NEXT:    s_cmp_lg_u32 s10, 0
+; VI-NEXT:    s_cselect_b32 s10, s17, s11
+; VI-NEXT:    s_cselect_b32 s11, s18, s16
+; VI-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; VI-NEXT:    s_subb_u32 s3, s3, s14
+; VI-NEXT:    s_cmp_ge_u32 s3, s5
+; VI-NEXT:    s_cselect_b32 s8, -1, 0
+; VI-NEXT:    s_cmp_ge_u32 s15, s4
+; VI-NEXT:    s_cselect_b32 s9, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s3, s5
+; VI-NEXT:    s_cselect_b32 s3, s9, s8
+; VI-NEXT:    s_cmp_lg_u32 s3, 0
+; VI-NEXT:    s_cselect_b32 s9, s11, s13
+; VI-NEXT:    s_cselect_b32 s8, s10, s12
+; VI-NEXT:    s_cbranch_execnz .LBB16_4
 ; VI-NEXT:  .LBB16_2:
-; VI-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; VI-NEXT:    s_sub_i32 s0, 0, s2
+; VI-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; VI-NEXT:    s_sub_i32 s3, 0, s4
 ; VI-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; VI-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; VI-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; VI-NEXT:    v_mul_lo_u32 v1, s0, v0
+; VI-NEXT:    v_mul_lo_u32 v1, s3, v0
 ; VI-NEXT:    v_mul_hi_u32 v1, v0, v1
 ; VI-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
-; VI-NEXT:    v_mul_hi_u32 v0, s10, v0
-; VI-NEXT:    v_readfirstlane_b32 s0, v0
-; VI-NEXT:    s_mul_i32 s0, s0, s2
-; VI-NEXT:    s_sub_i32 s0, s10, s0
-; VI-NEXT:    s_sub_i32 s1, s0, s2
+; VI-NEXT:    v_mul_hi_u32 v0, s2, v0
+; VI-NEXT:    v_readfirstlane_b32 s3, v0
+; VI-NEXT:    s_mul_i32 s3, s3, s4
+; VI-NEXT:    s_sub_i32 s2, s2, s3
+; VI-NEXT:    s_sub_i32 s3, s2, s4
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, 1, v0
-; VI-NEXT:    s_cmp_ge_u32 s0, s2
+; VI-NEXT:    s_cmp_ge_u32 s2, s4
 ; VI-NEXT:    s_cselect_b64 vcc, -1, 0
-; VI-NEXT:    s_cselect_b32 s0, s1, s0
+; VI-NEXT:    s_cselect_b32 s2, s3, s2
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; VI-NEXT:    v_add_u32_e32 v1, vcc, 1, v0
-; VI-NEXT:    s_cmp_ge_u32 s0, s2
+; VI-NEXT:    s_cmp_ge_u32 s2, s4
 ; VI-NEXT:    s_cselect_b64 vcc, -1, 0
 ; VI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; VI-NEXT:    v_mov_b32_e32 v1, 0
+; VI-NEXT:    s_branch .LBB16_5
 ; VI-NEXT:  .LBB16_3:
-; VI-NEXT:    v_mov_b32_e32 v2, s8
-; VI-NEXT:    v_mov_b32_e32 v3, s9
+; VI-NEXT:    ; implicit-def: $sgpr8_sgpr9
+; VI-NEXT:    s_branch .LBB16_2
+; VI-NEXT:  .LBB16_4:
+; VI-NEXT:    v_mov_b32_e32 v0, s8
+; VI-NEXT:    v_mov_b32_e32 v1, s9
+; VI-NEXT:  .LBB16_5:
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    v_mov_b32_e32 v3, s1
 ; VI-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
 ; VI-NEXT:    s_endpgm
-; VI-NEXT:  .LBB16_4:
-; VI-NEXT:    ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT:    s_branch .LBB16_2
 ;
 ; GFX9-LABEL: sudiv64:
 ; GFX9:       ; %bb.0:
-; GFX9-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x34
+; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_or_b64 s[0:1], s[10:11], s[2:3]
-; GFX9-NEXT:    s_mov_b32 s0, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT:    s_or_b64 s[4:5], s[2:3], s[6:7]
+; GFX9-NEXT:    s_mov_b32 s4, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; GFX9-NEXT:    s_cbranch_scc0 .LBB16_4
 ; GFX9-NEXT:  ; %bb.1:
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GFX9-NEXT:    s_sub_u32 s0, 0, s2
-; GFX9-NEXT:    s_subb_u32 s1, 0, s3
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s7
+; GFX9-NEXT:    s_sub_u32 s10, 0, s6
+; GFX9-NEXT:    s_subb_u32 s11, 0, s7
 ; GFX9-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2297,166 +2295,157 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX9-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT:    v_readfirstlane_b32 s6, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s7, v0
-; GFX9-NEXT:    s_mul_i32 s12, s0, s6
-; GFX9-NEXT:    s_mul_hi_u32 s14, s0, s7
-; GFX9-NEXT:    s_mul_i32 s13, s1, s7
-; GFX9-NEXT:    s_add_i32 s12, s14, s12
-; GFX9-NEXT:    s_add_i32 s12, s12, s13
-; GFX9-NEXT:    s_mul_i32 s15, s0, s7
-; GFX9-NEXT:    s_mul_hi_u32 s13, s7, s12
-; GFX9-NEXT:    s_mul_i32 s14, s7, s12
-; GFX9-NEXT:    s_mul_hi_u32 s7, s7, s15
-; GFX9-NEXT:    s_add_u32 s7, s7, s14
+; GFX9-NEXT:    v_readfirstlane_b32 s12, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s8, v0
+; GFX9-NEXT:    s_mul_i32 s9, s10, s12
+; GFX9-NEXT:    s_mul_hi_u32 s14, s10, s8
+; GFX9-NEXT:    s_mul_i32 s13, s11, s8
+; GFX9-NEXT:    s_add_i32 s9, s14, s9
+; GFX9-NEXT:    s_add_i32 s9, s9, s13
+; GFX9-NEXT:    s_mul_i32 s15, s10, s8
+; GFX9-NEXT:    s_mul_i32 s14, s8, s9
+; GFX9-NEXT:    s_mul_hi_u32 s16, s8, s15
+; GFX9-NEXT:    s_mul_hi_u32 s13, s8, s9
+; GFX9-NEXT:    s_add_u32 s14, s16, s14
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s13
-; GFX9-NEXT:    s_mul_hi_u32 s16, s6, s15
-; GFX9-NEXT:    s_mul_i32 s15, s6, s15
-; GFX9-NEXT:    s_add_u32 s7, s7, s15
-; GFX9-NEXT:    s_mul_hi_u32 s14, s6, s12
-; GFX9-NEXT:    s_addc_u32 s7, s13, s16
-; GFX9-NEXT:    s_addc_u32 s13, s14, 0
-; GFX9-NEXT:    s_mul_i32 s12, s6, s12
-; GFX9-NEXT:    s_add_u32 s7, s7, s12
-; GFX9-NEXT:    s_addc_u32 s12, 0, s13
-; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s7, v0
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s6, s6, s12
-; GFX9-NEXT:    v_readfirstlane_b32 s12, v0
-; GFX9-NEXT:    s_mul_i32 s7, s0, s6
-; GFX9-NEXT:    s_mul_hi_u32 s13, s0, s12
-; GFX9-NEXT:    s_add_i32 s7, s13, s7
-; GFX9-NEXT:    s_mul_i32 s1, s1, s12
-; GFX9-NEXT:    s_add_i32 s7, s7, s1
-; GFX9-NEXT:    s_mul_i32 s0, s0, s12
-; GFX9-NEXT:    s_mul_hi_u32 s13, s6, s0
-; GFX9-NEXT:    s_mul_i32 s14, s6, s0
-; GFX9-NEXT:    s_mul_i32 s16, s12, s7
-; GFX9-NEXT:    s_mul_hi_u32 s0, s12, s0
-; GFX9-NEXT:    s_mul_hi_u32 s15, s12, s7
-; GFX9-NEXT:    s_add_u32 s0, s0, s16
-; GFX9-NEXT:    s_addc_u32 s12, 0, s15
-; GFX9-NEXT:    s_add_u32 s0, s0, s14
-; GFX9-NEXT:    s_mul_hi_u32 s1, s6, s7
-; GFX9-NEXT:    s_addc_u32 s0, s12, s13
-; GFX9-NEXT:    s_addc_u32 s1, s1, 0
-; GFX9-NEXT:    s_mul_i32 s7, s6, s7
-; GFX9-NEXT:    s_add_u32 s0, s0, s7
-; GFX9-NEXT:    s_addc_u32 s1, 0, s1
-; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_addc_u32 s0, s6, s1
-; GFX9-NEXT:    v_readfirstlane_b32 s7, v0
-; GFX9-NEXT:    s_mul_i32 s6, s10, s0
-; GFX9-NEXT:    s_mul_hi_u32 s12, s10, s7
-; GFX9-NEXT:    s_mul_hi_u32 s1, s10, s0
-; GFX9-NEXT:    s_add_u32 s6, s12, s6
-; GFX9-NEXT:    s_addc_u32 s1, 0, s1
-; GFX9-NEXT:    s_mul_hi_u32 s13, s11, s7
-; GFX9-NEXT:    s_mul_i32 s7, s11, s7
-; GFX9-NEXT:    s_add_u32 s6, s6, s7
-; GFX9-NEXT:    s_mul_hi_u32 s12, s11, s0
-; GFX9-NEXT:    s_addc_u32 s1, s1, s13
-; GFX9-NEXT:    s_addc_u32 s6, s12, 0
-; GFX9-NEXT:    s_mul_i32 s0, s11, s0
-; GFX9-NEXT:    s_add_u32 s7, s1, s0
-; GFX9-NEXT:    s_addc_u32 s6, 0, s6
-; GFX9-NEXT:    s_mul_i32 s0, s2, s6
-; GFX9-NEXT:    s_mul_hi_u32 s1, s2, s7
-; GFX9-NEXT:    s_add_i32 s0, s1, s0
-; GFX9-NEXT:    s_mul_i32 s1, s3, s7
-; GFX9-NEXT:    s_add_i32 s12, s0, s1
-; GFX9-NEXT:    s_mul_i32 s1, s2, s7
-; GFX9-NEXT:    v_mov_b32_e32 v0, s1
-; GFX9-NEXT:    s_sub_i32 s0, s11, s12
-; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s10, v0
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    s_subb_u32 s13, s0, s3
-; GFX9-NEXT:    v_subrev_co_u32_e64 v1, s[0:1], s2, v0
-; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX9-NEXT:    s_subb_u32 s13, s13, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s13, s3
-; GFX9-NEXT:    s_cselect_b32 s14, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v1
-; GFX9-NEXT:    s_cmp_eq_u32 s13, s3
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, 0, -1, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s14
-; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v2, v1, s[0:1]
-; GFX9-NEXT:    s_add_u32 s0, s7, 1
-; GFX9-NEXT:    s_addc_u32 s13, s6, 0
-; GFX9-NEXT:    s_add_u32 s1, s7, 2
-; GFX9-NEXT:    s_addc_u32 s14, s6, 0
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v1
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, s13
-; GFX9-NEXT:    v_mov_b32_e32 v3, s14
-; GFX9-NEXT:    s_cmp_lg_u64 vcc, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
-; GFX9-NEXT:    s_subb_u32 s0, s11, s12
-; GFX9-NEXT:    s_cmp_ge_u32 s0, s3
-; GFX9-NEXT:    s_cselect_b32 s1, -1, 0
-; GFX9-NEXT:    v_cmp_le_u32_e32 vcc, s2, v0
-; GFX9-NEXT:    s_cmp_eq_u32 s0, s3
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
-; GFX9-NEXT:    v_mov_b32_e32 v3, s6
-; GFX9-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s7
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT:    s_mul_hi_u32 s17, s12, s15
+; GFX9-NEXT:    s_mul_i32 s15, s12, s15
+; GFX9-NEXT:    s_add_u32 s14, s14, s15
+; GFX9-NEXT:    s_mul_hi_u32 s16, s12, s9
+; GFX9-NEXT:    s_addc_u32 s13, s13, s17
+; GFX9-NEXT:    s_addc_u32 s14, s16, 0
+; GFX9-NEXT:    s_mul_i32 s9, s12, s9
+; GFX9-NEXT:    s_add_u32 s9, s13, s9
+; GFX9-NEXT:    s_addc_u32 s13, 0, s14
+; GFX9-NEXT:    s_add_i32 s14, s8, s9
+; GFX9-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX9-NEXT:    s_addc_u32 s12, s12, s13
+; GFX9-NEXT:    s_mul_i32 s8, s10, s12
+; GFX9-NEXT:    s_mul_hi_u32 s9, s10, s14
+; GFX9-NEXT:    s_add_i32 s8, s9, s8
+; GFX9-NEXT:    s_mul_i32 s11, s11, s14
+; GFX9-NEXT:    s_add_i32 s8, s8, s11
+; GFX9-NEXT:    s_mul_i32 s10, s10, s14
+; GFX9-NEXT:    s_mul_hi_u32 s11, s12, s10
+; GFX9-NEXT:    s_mul_i32 s13, s12, s10
+; GFX9-NEXT:    s_mul_i32 s16, s14, s8
+; GFX9-NEXT:    s_mul_hi_u32 s10, s14, s10
+; GFX9-NEXT:    s_mul_hi_u32 s15, s14, s8
+; GFX9-NEXT:    s_add_u32 s10, s10, s16
+; GFX9-NEXT:    s_addc_u32 s15, 0, s15
+; GFX9-NEXT:    s_add_u32 s10, s10, s13
+; GFX9-NEXT:    s_mul_hi_u32 s9, s12, s8
+; GFX9-NEXT:    s_addc_u32 s10, s15, s11
+; GFX9-NEXT:    s_addc_u32 s9, s9, 0
+; GFX9-NEXT:    s_mul_i32 s8, s12, s8
+; GFX9-NEXT:    s_add_u32 s8, s10, s8
+; GFX9-NEXT:    s_addc_u32 s10, 0, s9
+; GFX9-NEXT:    s_add_i32 s14, s14, s8
+; GFX9-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX9-NEXT:    s_addc_u32 s8, s12, s10
+; GFX9-NEXT:    s_mul_i32 s10, s2, s8
+; GFX9-NEXT:    s_mul_hi_u32 s11, s2, s14
+; GFX9-NEXT:    s_mul_hi_u32 s9, s2, s8
+; GFX9-NEXT:    s_add_u32 s10, s11, s10
+; GFX9-NEXT:    s_addc_u32 s9, 0, s9
+; GFX9-NEXT:    s_mul_i32 s13, s3, s14
+; GFX9-NEXT:    s_mul_hi_u32 s12, s3, s14
+; GFX9-NEXT:    s_add_u32 s10, s10, s13
+; GFX9-NEXT:    s_mul_hi_u32 s11, s3, s8
+; GFX9-NEXT:    s_addc_u32 s9, s9, s12
+; GFX9-NEXT:    s_addc_u32 s10, s11, 0
+; GFX9-NEXT:    s_mul_i32 s8, s3, s8
+; GFX9-NEXT:    s_add_u32 s12, s9, s8
+; GFX9-NEXT:    s_addc_u32 s13, 0, s10
+; GFX9-NEXT:    s_mul_i32 s8, s6, s13
+; GFX9-NEXT:    s_mul_hi_u32 s9, s6, s12
+; GFX9-NEXT:    s_add_i32 s8, s9, s8
+; GFX9-NEXT:    s_mul_i32 s9, s7, s12
+; GFX9-NEXT:    s_add_i32 s14, s8, s9
+; GFX9-NEXT:    s_sub_i32 s10, s3, s14
+; GFX9-NEXT:    s_mul_i32 s8, s6, s12
+; GFX9-NEXT:    s_sub_i32 s15, s2, s8
+; GFX9-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX9-NEXT:    s_subb_u32 s16, s10, s7
+; GFX9-NEXT:    s_sub_i32 s17, s15, s6
+; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GFX9-NEXT:    s_subb_u32 s10, s16, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s10, s7
+; GFX9-NEXT:    s_cselect_b32 s11, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s17, s6
+; GFX9-NEXT:    s_cselect_b32 s16, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s10, s7
+; GFX9-NEXT:    s_cselect_b32 s10, s16, s11
+; GFX9-NEXT:    s_add_u32 s11, s12, 1
+; GFX9-NEXT:    s_addc_u32 s16, s13, 0
+; GFX9-NEXT:    s_add_u32 s17, s12, 2
+; GFX9-NEXT:    s_addc_u32 s18, s13, 0
+; GFX9-NEXT:    s_cmp_lg_u32 s10, 0
+; GFX9-NEXT:    s_cselect_b32 s10, s17, s11
+; GFX9-NEXT:    s_cselect_b32 s11, s18, s16
+; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX9-NEXT:    s_subb_u32 s3, s3, s14
+; GFX9-NEXT:    s_cmp_ge_u32 s3, s7
+; GFX9-NEXT:    s_cselect_b32 s8, -1, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s15, s6
+; GFX9-NEXT:    s_cselect_b32 s9, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s3, s7
+; GFX9-NEXT:    s_cselect_b32 s3, s9, s8
+; GFX9-NEXT:    s_cmp_lg_u32 s3, 0
+; GFX9-NEXT:    s_cselect_b32 s9, s11, s13
+; GFX9-NEXT:    s_cselect_b32 s8, s10, s12
 ; GFX9-NEXT:    s_cbranch_execnz .LBB16_3
 ; GFX9-NEXT:  .LBB16_2:
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX9-NEXT:    s_sub_i32 s0, 0, s2
-; GFX9-NEXT:    s_mov_b32 s1, 0
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX9-NEXT:    s_sub_i32 s3, 0, s6
+; GFX9-NEXT:    s_mov_b32 s9, 0
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT:    v_readfirstlane_b32 s3, v0
-; GFX9-NEXT:    s_mul_i32 s0, s0, s3
-; GFX9-NEXT:    s_mul_hi_u32 s0, s3, s0
-; GFX9-NEXT:    s_add_i32 s3, s3, s0
-; GFX9-NEXT:    s_mul_hi_u32 s0, s10, s3
-; GFX9-NEXT:    s_mul_i32 s4, s0, s2
-; GFX9-NEXT:    s_sub_i32 s4, s10, s4
-; GFX9-NEXT:    s_add_i32 s3, s0, 1
-; GFX9-NEXT:    s_sub_i32 s5, s4, s2
-; GFX9-NEXT:    s_cmp_ge_u32 s4, s2
-; GFX9-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX9-NEXT:    s_cselect_b32 s4, s5, s4
-; GFX9-NEXT:    s_add_i32 s3, s0, 1
-; GFX9-NEXT:    s_cmp_ge_u32 s4, s2
-; GFX9-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX9-NEXT:    v_mov_b32_e32 v0, s0
-; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX9-NEXT:    s_mul_i32 s3, s3, s4
+; GFX9-NEXT:    s_mul_hi_u32 s3, s4, s3
+; GFX9-NEXT:    s_add_i32 s4, s4, s3
+; GFX9-NEXT:    s_mul_hi_u32 s3, s2, s4
+; GFX9-NEXT:    s_mul_i32 s5, s3, s6
+; GFX9-NEXT:    s_sub_i32 s2, s2, s5
+; GFX9-NEXT:    s_add_i32 s4, s3, 1
+; GFX9-NEXT:    s_sub_i32 s5, s2, s6
+; GFX9-NEXT:    s_cmp_ge_u32 s2, s6
+; GFX9-NEXT:    s_cselect_b32 s3, s4, s3
+; GFX9-NEXT:    s_cselect_b32 s2, s5, s2
+; GFX9-NEXT:    s_add_i32 s4, s3, 1
+; GFX9-NEXT:    s_cmp_ge_u32 s2, s6
+; GFX9-NEXT:    s_cselect_b32 s8, s4, s3
 ; GFX9-NEXT:  .LBB16_3:
+; GFX9-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT:    v_mov_b32_e32 v1, s9
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9-NEXT:    s_endpgm
 ; GFX9-NEXT:  .LBB16_4:
-; GFX9-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GFX9-NEXT:    ; implicit-def: $sgpr8_sgpr9
 ; GFX9-NEXT:    s_branch .LBB16_2
 ;
 ; GFX1010-LABEL: sudiv64:
 ; GFX1010:       ; %bb.0:
 ; GFX1010-NEXT:    s_clause 0x1
-; GFX1010-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX1010-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x34
+; GFX1010-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1010-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1010-NEXT:    s_or_b64 s[4:5], s[10:11], s[2:3]
+; GFX1010-NEXT:    s_or_b64 s[4:5], s[2:3], s[6:7]
 ; GFX1010-NEXT:    s_mov_b32 s4, 0
 ; GFX1010-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; GFX1010-NEXT:    s_cbranch_scc0 .LBB16_4
 ; GFX1010-NEXT:  ; %bb.1:
-; GFX1010-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX1010-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GFX1010-NEXT:    s_sub_u32 s5, 0, s2
-; GFX1010-NEXT:    s_subb_u32 s6, 0, s3
+; GFX1010-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX1010-NEXT:    v_cvt_f32_u32_e32 v1, s7
+; GFX1010-NEXT:    s_sub_u32 s9, 0, s6
+; GFX1010-NEXT:    s_subb_u32 s10, 0, s7
 ; GFX1010-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX1010-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX1010-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2465,160 +2454,158 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1010-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GFX1010-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX1010-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1010-NEXT:    v_readfirstlane_b32 s0, v1
-; GFX1010-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1010-NEXT:    s_mul_i32 s7, s5, s0
-; GFX1010-NEXT:    s_mul_hi_u32 s13, s5, s1
-; GFX1010-NEXT:    s_mul_i32 s12, s6, s1
-; GFX1010-NEXT:    s_add_i32 s7, s13, s7
-; GFX1010-NEXT:    s_mul_i32 s14, s5, s1
-; GFX1010-NEXT:    s_add_i32 s7, s7, s12
-; GFX1010-NEXT:    s_mul_hi_u32 s13, s1, s14
-; GFX1010-NEXT:    s_mul_hi_u32 s15, s0, s14
-; GFX1010-NEXT:    s_mul_i32 s12, s0, s14
-; GFX1010-NEXT:    s_mul_hi_u32 s14, s1, s7
-; GFX1010-NEXT:    s_mul_i32 s1, s1, s7
-; GFX1010-NEXT:    s_mul_hi_u32 s16, s0, s7
-; GFX1010-NEXT:    s_add_u32 s1, s13, s1
-; GFX1010-NEXT:    s_addc_u32 s13, 0, s14
-; GFX1010-NEXT:    s_add_u32 s1, s1, s12
-; GFX1010-NEXT:    s_mul_i32 s7, s0, s7
-; GFX1010-NEXT:    s_addc_u32 s1, s13, s15
-; GFX1010-NEXT:    s_addc_u32 s12, s16, 0
-; GFX1010-NEXT:    s_add_u32 s1, s1, s7
-; GFX1010-NEXT:    s_addc_u32 s7, 0, s12
-; GFX1010-NEXT:    v_add_co_u32 v0, s1, v0, s1
-; GFX1010-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX1010-NEXT:    s_addc_u32 s0, s0, s7
-; GFX1010-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1010-NEXT:    s_mul_i32 s7, s5, s0
-; GFX1010-NEXT:    s_mul_hi_u32 s12, s5, s1
-; GFX1010-NEXT:    s_mul_i32 s6, s6, s1
-; GFX1010-NEXT:    s_add_i32 s7, s12, s7
-; GFX1010-NEXT:    s_mul_i32 s5, s5, s1
-; GFX1010-NEXT:    s_add_i32 s7, s7, s6
-; GFX1010-NEXT:    s_mul_hi_u32 s12, s0, s5
-; GFX1010-NEXT:    s_mul_i32 s13, s0, s5
-; GFX1010-NEXT:    s_mul_hi_u32 s5, s1, s5
-; GFX1010-NEXT:    s_mul_hi_u32 s14, s1, s7
-; GFX1010-NEXT:    s_mul_i32 s1, s1, s7
-; GFX1010-NEXT:    s_mul_hi_u32 s6, s0, s7
-; GFX1010-NEXT:    s_add_u32 s1, s5, s1
-; GFX1010-NEXT:    s_addc_u32 s5, 0, s14
-; GFX1010-NEXT:    s_add_u32 s1, s1, s13
-; GFX1010-NEXT:    s_mul_i32 s7, s0, s7
-; GFX1010-NEXT:    s_addc_u32 s1, s5, s12
-; GFX1010-NEXT:    s_addc_u32 s5, s6, 0
-; GFX1010-NEXT:    s_add_u32 s1, s1, s7
-; GFX1010-NEXT:    s_addc_u32 s5, 0, s5
-; GFX1010-NEXT:    v_add_co_u32 v0, s1, v0, s1
-; GFX1010-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX1010-NEXT:    s_addc_u32 s0, s0, s5
-; GFX1010-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1010-NEXT:    s_mul_i32 s6, s10, s0
-; GFX1010-NEXT:    s_mul_hi_u32 s5, s10, s0
-; GFX1010-NEXT:    s_mul_hi_u32 s7, s11, s0
-; GFX1010-NEXT:    s_mul_i32 s0, s11, s0
-; GFX1010-NEXT:    s_mul_hi_u32 s12, s10, s1
-; GFX1010-NEXT:    s_mul_hi_u32 s13, s11, s1
-; GFX1010-NEXT:    s_mul_i32 s1, s11, s1
-; GFX1010-NEXT:    s_add_u32 s6, s12, s6
-; GFX1010-NEXT:    s_addc_u32 s5, 0, s5
-; GFX1010-NEXT:    s_add_u32 s1, s6, s1
-; GFX1010-NEXT:    s_addc_u32 s1, s5, s13
-; GFX1010-NEXT:    s_addc_u32 s5, s7, 0
-; GFX1010-NEXT:    s_add_u32 s1, s1, s0
-; GFX1010-NEXT:    s_addc_u32 s5, 0, s5
-; GFX1010-NEXT:    s_mul_hi_u32 s0, s2, s1
-; GFX1010-NEXT:    s_mul_i32 s7, s2, s5
-; GFX1010-NEXT:    s_mul_i32 s12, s2, s1
-; GFX1010-NEXT:    s_add_i32 s0, s0, s7
-; GFX1010-NEXT:    v_sub_co_u32 v0, s7, s10, s12
-; GFX1010-NEXT:    s_mul_i32 s6, s3, s1
-; GFX1010-NEXT:    s_add_i32 s0, s0, s6
-; GFX1010-NEXT:    v_sub_co_u32 v1, s12, v0, s2
-; GFX1010-NEXT:    s_sub_i32 s6, s11, s0
-; GFX1010-NEXT:    s_cmp_lg_u32 s7, 0
-; GFX1010-NEXT:    s_subb_u32 s6, s6, s3
+; GFX1010-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1010-NEXT:    v_readfirstlane_b32 s8, v0
+; GFX1010-NEXT:    s_mul_i32 s11, s9, s5
+; GFX1010-NEXT:    s_mul_hi_u32 s13, s9, s8
+; GFX1010-NEXT:    s_mul_i32 s12, s10, s8
+; GFX1010-NEXT:    s_add_i32 s11, s13, s11
+; GFX1010-NEXT:    s_mul_i32 s14, s9, s8
+; GFX1010-NEXT:    s_add_i32 s11, s11, s12
+; GFX1010-NEXT:    s_mul_hi_u32 s13, s8, s14
+; GFX1010-NEXT:    s_mul_i32 s16, s8, s11
+; GFX1010-NEXT:    s_mul_hi_u32 s15, s5, s14
+; GFX1010-NEXT:    s_mul_i32 s12, s5, s14
+; GFX1010-NEXT:    s_mul_hi_u32 s14, s8, s11
+; GFX1010-NEXT:    s_add_u32 s13, s13, s16
+; GFX1010-NEXT:    s_addc_u32 s14, 0, s14
+; GFX1010-NEXT:    s_mul_hi_u32 s17, s5, s11
+; GFX1010-NEXT:    s_add_u32 s12, s13, s12
+; GFX1010-NEXT:    s_mul_i32 s11, s5, s11
+; GFX1010-NEXT:    s_addc_u32 s12, s14, s15
+; GFX1010-NEXT:    s_addc_u32 s13, s17, 0
+; GFX1010-NEXT:    s_add_u32 s11, s12, s11
+; GFX1010-NEXT:    s_addc_u32 s12, 0, s13
+; GFX1010-NEXT:    s_add_i32 s8, s8, s11
+; GFX1010-NEXT:    s_cselect_b32 s11, 1, 0
+; GFX1010-NEXT:    s_mul_hi_u32 s13, s9, s8
+; GFX1010-NEXT:    s_cmp_lg_u32 s11, 0
+; GFX1010-NEXT:    s_mul_i32 s11, s9, s8
+; GFX1010-NEXT:    s_addc_u32 s5, s5, s12
+; GFX1010-NEXT:    s_mul_i32 s10, s10, s8
+; GFX1010-NEXT:    s_mul_i32 s9, s9, s5
+; GFX1010-NEXT:    s_mul_hi_u32 s12, s8, s11
+; GFX1010-NEXT:    s_add_i32 s9, s13, s9
+; GFX1010-NEXT:    s_mul_hi_u32 s13, s5, s11
+; GFX1010-NEXT:    s_add_i32 s9, s9, s10
+; GFX1010-NEXT:    s_mul_i32 s10, s5, s11
+; GFX1010-NEXT:    s_mul_i32 s15, s8, s9
+; GFX1010-NEXT:    s_mul_hi_u32 s14, s8, s9
+; GFX1010-NEXT:    s_add_u32 s12, s12, s15
+; GFX1010-NEXT:    s_addc_u32 s14, 0, s14
+; GFX1010-NEXT:    s_mul_hi_u32 s11, s5, s9
+; GFX1010-NEXT:    s_add_u32 s10, s12, s10
+; GFX1010-NEXT:    s_mul_i32 s9, s5, s9
+; GFX1010-NEXT:    s_addc_u32 s10, s14, s13
+; GFX1010-NEXT:    s_addc_u32 s11, s11, 0
+; GFX1010-NEXT:    s_add_u32 s9, s10, s9
+; GFX1010-NEXT:    s_addc_u32 s10, 0, s11
+; GFX1010-NEXT:    s_add_i32 s8, s8, s9
+; GFX1010-NEXT:    s_cselect_b32 s9, 1, 0
+; GFX1010-NEXT:    s_mul_hi_u32 s11, s2, s8
+; GFX1010-NEXT:    s_cmp_lg_u32 s9, 0
+; GFX1010-NEXT:    s_mul_hi_u32 s9, s3, s8
+; GFX1010-NEXT:    s_addc_u32 s5, s5, s10
+; GFX1010-NEXT:    s_mul_i32 s8, s3, s8
+; GFX1010-NEXT:    s_mul_i32 s12, s2, s5
+; GFX1010-NEXT:    s_mul_hi_u32 s10, s2, s5
+; GFX1010-NEXT:    s_add_u32 s11, s11, s12
+; GFX1010-NEXT:    s_addc_u32 s10, 0, s10
+; GFX1010-NEXT:    s_mul_hi_u32 s13, s3, s5
+; GFX1010-NEXT:    s_add_u32 s8, s11, s8
+; GFX1010-NEXT:    s_mul_i32 s5, s3, s5
+; GFX1010-NEXT:    s_addc_u32 s8, s10, s9
+; GFX1010-NEXT:    s_addc_u32 s9, s13, 0
+; GFX1010-NEXT:    s_add_u32 s5, s8, s5
+; GFX1010-NEXT:    s_addc_u32 s8, 0, s9
+; GFX1010-NEXT:    s_mul_hi_u32 s9, s6, s5
+; GFX1010-NEXT:    s_mul_i32 s10, s6, s8
+; GFX1010-NEXT:    s_mul_i32 s11, s7, s5
+; GFX1010-NEXT:    s_add_i32 s9, s9, s10
+; GFX1010-NEXT:    s_mul_i32 s10, s6, s5
+; GFX1010-NEXT:    s_add_i32 s9, s9, s11
+; GFX1010-NEXT:    s_sub_i32 s11, s3, s9
+; GFX1010-NEXT:    s_sub_i32 s10, s2, s10
+; GFX1010-NEXT:    s_cselect_b32 s12, 1, 0
 ; GFX1010-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX1010-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v1
-; GFX1010-NEXT:    s_subb_u32 s6, s6, 0
-; GFX1010-NEXT:    s_cmp_ge_u32 s6, s3
-; GFX1010-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc_lo
-; GFX1010-NEXT:    s_cselect_b32 s12, -1, 0
-; GFX1010-NEXT:    s_cmp_eq_u32 s6, s3
-; GFX1010-NEXT:    s_cselect_b32 vcc_lo, -1, 0
-; GFX1010-NEXT:    s_add_u32 s6, s1, 1
-; GFX1010-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX1010-NEXT:    s_addc_u32 s12, s5, 0
-; GFX1010-NEXT:    s_add_u32 s13, s1, 2
-; GFX1010-NEXT:    s_addc_u32 s14, s5, 0
-; GFX1010-NEXT:    s_cmp_lg_u32 s7, 0
-; GFX1010-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v0
-; GFX1010-NEXT:    s_subb_u32 s0, s11, s0
-; GFX1010-NEXT:    v_mov_b32_e32 v2, s13
-; GFX1010-NEXT:    s_cmp_ge_u32 s0, s3
-; GFX1010-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc_lo
-; GFX1010-NEXT:    s_cselect_b32 s7, -1, 0
-; GFX1010-NEXT:    s_cmp_eq_u32 s0, s3
-; GFX1010-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v1
-; GFX1010-NEXT:    s_cselect_b32 s0, -1, 0
-; GFX1010-NEXT:    v_mov_b32_e32 v1, s14
-; GFX1010-NEXT:    v_cndmask_b32_e64 v0, s7, v0, s0
-; GFX1010-NEXT:    v_cndmask_b32_e32 v2, s6, v2, vcc_lo
-; GFX1010-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX1010-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX1010-NEXT:    v_cndmask_b32_e32 v1, s5, v1, vcc_lo
-; GFX1010-NEXT:    v_cndmask_b32_e32 v0, s1, v2, vcc_lo
+; GFX1010-NEXT:    s_subb_u32 s11, s11, s7
+; GFX1010-NEXT:    s_sub_i32 s13, s10, s6
+; GFX1010-NEXT:    s_cselect_b32 s14, 1, 0
+; GFX1010-NEXT:    s_cmp_lg_u32 s14, 0
+; GFX1010-NEXT:    s_subb_u32 s11, s11, 0
+; GFX1010-NEXT:    s_cmp_ge_u32 s11, s7
+; GFX1010-NEXT:    s_cselect_b32 s14, -1, 0
+; GFX1010-NEXT:    s_cmp_ge_u32 s13, s6
+; GFX1010-NEXT:    s_cselect_b32 s13, -1, 0
+; GFX1010-NEXT:    s_cmp_eq_u32 s11, s7
+; GFX1010-NEXT:    s_cselect_b32 s11, s13, s14
+; GFX1010-NEXT:    s_add_u32 s13, s5, 1
+; GFX1010-NEXT:    s_addc_u32 s14, s8, 0
+; GFX1010-NEXT:    s_add_u32 s15, s5, 2
+; GFX1010-NEXT:    s_addc_u32 s16, s8, 0
+; GFX1010-NEXT:    s_cmp_lg_u32 s11, 0
+; GFX1010-NEXT:    s_cselect_b32 s11, s15, s13
+; GFX1010-NEXT:    s_cselect_b32 s13, s16, s14
+; GFX1010-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX1010-NEXT:    s_subb_u32 s3, s3, s9
+; GFX1010-NEXT:    s_cmp_ge_u32 s3, s7
+; GFX1010-NEXT:    s_cselect_b32 s9, -1, 0
+; GFX1010-NEXT:    s_cmp_ge_u32 s10, s6
+; GFX1010-NEXT:    s_cselect_b32 s10, -1, 0
+; GFX1010-NEXT:    s_cmp_eq_u32 s3, s7
+; GFX1010-NEXT:    s_cselect_b32 s3, s10, s9
+; GFX1010-NEXT:    s_cmp_lg_u32 s3, 0
+; GFX1010-NEXT:    s_cselect_b32 s9, s13, s8
+; GFX1010-NEXT:    s_cselect_b32 s8, s11, s5
 ; GFX1010-NEXT:    s_andn2_b32 vcc_lo, exec_lo, s4
 ; GFX1010-NEXT:    s_cbranch_vccnz .LBB16_3
 ; GFX1010-NEXT:  .LBB16_2:
-; GFX1010-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX1010-NEXT:    s_sub_i32 s1, 0, s2
+; GFX1010-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX1010-NEXT:    s_sub_i32 s4, 0, s6
+; GFX1010-NEXT:    s_mov_b32 s9, 0
 ; GFX1010-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX1010-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX1010-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1010-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1010-NEXT:    s_mul_i32 s1, s1, s0
-; GFX1010-NEXT:    s_mul_hi_u32 s1, s0, s1
-; GFX1010-NEXT:    s_add_i32 s0, s0, s1
-; GFX1010-NEXT:    s_mul_hi_u32 s0, s10, s0
-; GFX1010-NEXT:    s_mul_i32 s1, s0, s2
-; GFX1010-NEXT:    s_add_i32 s3, s0, 1
-; GFX1010-NEXT:    s_sub_i32 s1, s10, s1
-; GFX1010-NEXT:    s_sub_i32 s4, s1, s2
-; GFX1010-NEXT:    s_cmp_ge_u32 s1, s2
-; GFX1010-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX1010-NEXT:    s_cselect_b32 s1, s4, s1
-; GFX1010-NEXT:    s_add_i32 s3, s0, 1
-; GFX1010-NEXT:    s_cmp_ge_u32 s1, s2
-; GFX1010-NEXT:    s_mov_b32 s1, 0
-; GFX1010-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX1010-NEXT:    v_mov_b32_e32 v0, s0
-; GFX1010-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1010-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX1010-NEXT:    s_mul_i32 s4, s4, s3
+; GFX1010-NEXT:    s_mul_hi_u32 s4, s3, s4
+; GFX1010-NEXT:    s_add_i32 s3, s3, s4
+; GFX1010-NEXT:    s_mul_hi_u32 s3, s2, s3
+; GFX1010-NEXT:    s_mul_i32 s4, s3, s6
+; GFX1010-NEXT:    s_sub_i32 s2, s2, s4
+; GFX1010-NEXT:    s_add_i32 s4, s3, 1
+; GFX1010-NEXT:    s_sub_i32 s5, s2, s6
+; GFX1010-NEXT:    s_cmp_ge_u32 s2, s6
+; GFX1010-NEXT:    s_cselect_b32 s3, s4, s3
+; GFX1010-NEXT:    s_cselect_b32 s2, s5, s2
+; GFX1010-NEXT:    s_add_i32 s4, s3, 1
+; GFX1010-NEXT:    s_cmp_ge_u32 s2, s6
+; GFX1010-NEXT:    s_cselect_b32 s8, s4, s3
 ; GFX1010-NEXT:  .LBB16_3:
+; GFX1010-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX1010-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX1010-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1010-NEXT:    s_endpgm
 ; GFX1010-NEXT:  .LBB16_4:
-; GFX1010-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GFX1010-NEXT:    ; implicit-def: $sgpr8_sgpr9
 ; GFX1010-NEXT:    s_branch .LBB16_2
 ;
 ; GFX1030W32-LABEL: sudiv64:
 ; GFX1030W32:       ; %bb.0:
 ; GFX1030W32-NEXT:    s_clause 0x1
-; GFX1030W32-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX1030W32-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x34
+; GFX1030W32-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1030W32-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1030W32-NEXT:    s_or_b64 s[4:5], s[10:11], s[2:3]
-; GFX1030W32-NEXT:    s_mov_b32 s4, 0
-; GFX1030W32-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX1030W32-NEXT:    s_or_b64 s[6:7], s[2:3], s[4:5]
+; GFX1030W32-NEXT:    s_mov_b32 s6, 0
+; GFX1030W32-NEXT:    s_cmp_lg_u64 s[6:7], 0
 ; GFX1030W32-NEXT:    s_cbranch_scc0 .LBB16_4
 ; GFX1030W32-NEXT:  ; %bb.1:
-; GFX1030W32-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX1030W32-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GFX1030W32-NEXT:    s_sub_u32 s5, 0, s2
-; GFX1030W32-NEXT:    s_subb_u32 s6, 0, s3
+; GFX1030W32-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GFX1030W32-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GFX1030W32-NEXT:    s_sub_u32 s9, 0, s4
+; GFX1030W32-NEXT:    s_subb_u32 s10, 0, s5
 ; GFX1030W32-NEXT:    v_fmamk_f32 v0, v1, 0x4f800000, v0
 ; GFX1030W32-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX1030W32-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2627,160 +2614,158 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1030W32-NEXT:    v_fmamk_f32 v0, v1, 0xcf800000, v0
 ; GFX1030W32-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX1030W32-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1030W32-NEXT:    v_readfirstlane_b32 s0, v1
-; GFX1030W32-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1030W32-NEXT:    s_mul_i32 s7, s5, s0
-; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s5, s1
-; GFX1030W32-NEXT:    s_mul_i32 s12, s6, s1
-; GFX1030W32-NEXT:    s_add_i32 s7, s13, s7
-; GFX1030W32-NEXT:    s_mul_i32 s14, s5, s1
-; GFX1030W32-NEXT:    s_add_i32 s7, s7, s12
-; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s1, s14
-; GFX1030W32-NEXT:    s_mul_hi_u32 s15, s0, s14
-; GFX1030W32-NEXT:    s_mul_i32 s12, s0, s14
-; GFX1030W32-NEXT:    s_mul_hi_u32 s14, s1, s7
-; GFX1030W32-NEXT:    s_mul_i32 s1, s1, s7
-; GFX1030W32-NEXT:    s_mul_hi_u32 s16, s0, s7
-; GFX1030W32-NEXT:    s_add_u32 s1, s13, s1
-; GFX1030W32-NEXT:    s_addc_u32 s13, 0, s14
-; GFX1030W32-NEXT:    s_add_u32 s1, s1, s12
-; GFX1030W32-NEXT:    s_mul_i32 s7, s0, s7
-; GFX1030W32-NEXT:    s_addc_u32 s1, s13, s15
-; GFX1030W32-NEXT:    s_addc_u32 s12, s16, 0
-; GFX1030W32-NEXT:    s_add_u32 s1, s1, s7
-; GFX1030W32-NEXT:    s_addc_u32 s7, 0, s12
-; GFX1030W32-NEXT:    v_add_co_u32 v0, s1, v0, s1
-; GFX1030W32-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX1030W32-NEXT:    s_addc_u32 s0, s0, s7
-; GFX1030W32-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1030W32-NEXT:    s_mul_i32 s7, s5, s0
-; GFX1030W32-NEXT:    s_mul_hi_u32 s12, s5, s1
-; GFX1030W32-NEXT:    s_mul_i32 s6, s6, s1
-; GFX1030W32-NEXT:    s_add_i32 s7, s12, s7
-; GFX1030W32-NEXT:    s_mul_i32 s5, s5, s1
-; GFX1030W32-NEXT:    s_add_i32 s7, s7, s6
-; GFX1030W32-NEXT:    s_mul_hi_u32 s12, s0, s5
-; GFX1030W32-NEXT:    s_mul_i32 s13, s0, s5
-; GFX1030W32-NEXT:    s_mul_hi_u32 s5, s1, s5
-; GFX1030W32-NEXT:    s_mul_hi_u32 s14, s1, s7
-; GFX1030W32-NEXT:    s_mul_i32 s1, s1, s7
-; GFX1030W32-NEXT:    s_mul_hi_u32 s6, s0, s7
-; GFX1030W32-NEXT:    s_add_u32 s1, s5, s1
-; GFX1030W32-NEXT:    s_addc_u32 s5, 0, s14
-; GFX1030W32-NEXT:    s_add_u32 s1, s1, s13
-; GFX1030W32-NEXT:    s_mul_i32 s7, s0, s7
-; GFX1030W32-NEXT:    s_addc_u32 s1, s5, s12
-; GFX1030W32-NEXT:    s_addc_u32 s5, s6, 0
-; GFX1030W32-NEXT:    s_add_u32 s1, s1, s7
-; GFX1030W32-NEXT:    s_addc_u32 s5, 0, s5
-; GFX1030W32-NEXT:    v_add_co_u32 v0, s1, v0, s1
-; GFX1030W32-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX1030W32-NEXT:    s_addc_u32 s0, s0, s5
-; GFX1030W32-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1030W32-NEXT:    s_mul_i32 s6, s10, s0
-; GFX1030W32-NEXT:    s_mul_hi_u32 s5, s10, s0
-; GFX1030W32-NEXT:    s_mul_hi_u32 s7, s11, s0
-; GFX1030W32-NEXT:    s_mul_i32 s0, s11, s0
-; GFX1030W32-NEXT:    s_mul_hi_u32 s12, s10, s1
-; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s11, s1
-; GFX1030W32-NEXT:    s_mul_i32 s1, s11, s1
-; GFX1030W32-NEXT:    s_add_u32 s6, s12, s6
-; GFX1030W32-NEXT:    s_addc_u32 s5, 0, s5
-; GFX1030W32-NEXT:    s_add_u32 s1, s6, s1
-; GFX1030W32-NEXT:    s_addc_u32 s1, s5, s13
-; GFX1030W32-NEXT:    s_addc_u32 s5, s7, 0
-; GFX1030W32-NEXT:    s_add_u32 s1, s1, s0
-; GFX1030W32-NEXT:    s_addc_u32 s5, 0, s5
-; GFX1030W32-NEXT:    s_mul_hi_u32 s0, s2, s1
-; GFX1030W32-NEXT:    s_mul_i32 s7, s2, s5
-; GFX1030W32-NEXT:    s_mul_i32 s12, s2, s1
-; GFX1030W32-NEXT:    s_add_i32 s0, s0, s7
-; GFX1030W32-NEXT:    v_sub_co_u32 v0, s7, s10, s12
-; GFX1030W32-NEXT:    s_mul_i32 s6, s3, s1
-; GFX1030W32-NEXT:    s_add_i32 s0, s0, s6
-; GFX1030W32-NEXT:    v_sub_co_u32 v1, s12, v0, s2
-; GFX1030W32-NEXT:    s_sub_i32 s6, s11, s0
-; GFX1030W32-NEXT:    s_cmp_lg_u32 s7, 0
-; GFX1030W32-NEXT:    s_subb_u32 s6, s6, s3
+; GFX1030W32-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX1030W32-NEXT:    v_readfirstlane_b32 s8, v0
+; GFX1030W32-NEXT:    s_mul_i32 s11, s9, s7
+; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s9, s8
+; GFX1030W32-NEXT:    s_mul_i32 s12, s10, s8
+; GFX1030W32-NEXT:    s_add_i32 s11, s13, s11
+; GFX1030W32-NEXT:    s_mul_i32 s14, s9, s8
+; GFX1030W32-NEXT:    s_add_i32 s11, s11, s12
+; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s8, s14
+; GFX1030W32-NEXT:    s_mul_i32 s16, s8, s11
+; GFX1030W32-NEXT:    s_mul_hi_u32 s15, s7, s14
+; GFX1030W32-NEXT:    s_mul_i32 s12, s7, s14
+; GFX1030W32-NEXT:    s_mul_hi_u32 s14, s8, s11
+; GFX1030W32-NEXT:    s_add_u32 s13, s13, s16
+; GFX1030W32-NEXT:    s_addc_u32 s14, 0, s14
+; GFX1030W32-NEXT:    s_mul_hi_u32 s17, s7, s11
+; GFX1030W32-NEXT:    s_add_u32 s12, s13, s12
+; GFX1030W32-NEXT:    s_mul_i32 s11, s7, s11
+; GFX1030W32-NEXT:    s_addc_u32 s12, s14, s15
+; GFX1030W32-NEXT:    s_addc_u32 s13, s17, 0
+; GFX1030W32-NEXT:    s_add_u32 s11, s12, s11
+; GFX1030W32-NEXT:    s_addc_u32 s12, 0, s13
+; GFX1030W32-NEXT:    s_add_i32 s8, s8, s11
+; GFX1030W32-NEXT:    s_cselect_b32 s11, 1, 0
+; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s9, s8
+; GFX1030W32-NEXT:    s_cmp_lg_u32 s11, 0
+; GFX1030W32-NEXT:    s_mul_i32 s11, s9, s8
+; GFX1030W32-NEXT:    s_addc_u32 s7, s7, s12
+; GFX1030W32-NEXT:    s_mul_i32 s10, s10, s8
+; GFX1030W32-NEXT:    s_mul_i32 s9, s9, s7
+; GFX1030W32-NEXT:    s_mul_hi_u32 s12, s8, s11
+; GFX1030W32-NEXT:    s_add_i32 s9, s13, s9
+; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s7, s11
+; GFX1030W32-NEXT:    s_add_i32 s9, s9, s10
+; GFX1030W32-NEXT:    s_mul_i32 s10, s7, s11
+; GFX1030W32-NEXT:    s_mul_i32 s15, s8, s9
+; GFX1030W32-NEXT:    s_mul_hi_u32 s14, s8, s9
+; GFX1030W32-NEXT:    s_add_u32 s12, s12, s15
+; GFX1030W32-NEXT:    s_addc_u32 s14, 0, s14
+; GFX1030W32-NEXT:    s_mul_hi_u32 s11, s7, s9
+; GFX1030W32-NEXT:    s_add_u32 s10, s12, s10
+; GFX1030W32-NEXT:    s_mul_i32 s9, s7, s9
+; GFX1030W32-NEXT:    s_addc_u32 s10, s14, s13
+; GFX1030W32-NEXT:    s_addc_u32 s11, s11, 0
+; GFX1030W32-NEXT:    s_add_u32 s9, s10, s9
+; GFX1030W32-NEXT:    s_addc_u32 s10, 0, s11
+; GFX1030W32-NEXT:    s_add_i32 s8, s8, s9
+; GFX1030W32-NEXT:    s_cselect_b32 s9, 1, 0
+; GFX1030W32-NEXT:    s_mul_hi_u32 s11, s2, s8
+; GFX1030W32-NEXT:    s_cmp_lg_u32 s9, 0
+; GFX1030W32-NEXT:    s_mul_hi_u32 s9, s3, s8
+; GFX1030W32-NEXT:    s_addc_u32 s7, s7, s10
+; GFX1030W32-NEXT:    s_mul_i32 s8, s3, s8
+; GFX1030W32-NEXT:    s_mul_i32 s12, s2, s7
+; GFX1030W32-NEXT:    s_mul_hi_u32 s10, s2, s7
+; GFX1030W32-NEXT:    s_add_u32 s11, s11, s12
+; GFX1030W32-NEXT:    s_addc_u32 s10, 0, s10
+; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s3, s7
+; GFX1030W32-NEXT:    s_add_u32 s8, s11, s8
+; GFX1030W32-NEXT:    s_mul_i32 s7, s3, s7
+; GFX1030W32-NEXT:    s_addc_u32 s8, s10, s9
+; GFX1030W32-NEXT:    s_addc_u32 s9, s13, 0
+; GFX1030W32-NEXT:    s_add_u32 s7, s8, s7
+; GFX1030W32-NEXT:    s_addc_u32 s8, 0, s9
+; GFX1030W32-NEXT:    s_mul_hi_u32 s9, s4, s7
+; GFX1030W32-NEXT:    s_mul_i32 s10, s4, s8
+; GFX1030W32-NEXT:    s_mul_i32 s11, s5, s7
+; GFX1030W32-NEXT:    s_add_i32 s9, s9, s10
+; GFX1030W32-NEXT:    s_mul_i32 s10, s4, s7
+; GFX1030W32-NEXT:    s_add_i32 s9, s9, s11
+; GFX1030W32-NEXT:    s_sub_i32 s11, s3, s9
+; GFX1030W32-NEXT:    s_sub_i32 s10, s2, s10
+; GFX1030W32-NEXT:    s_cselect_b32 s12, 1, 0
+; GFX1030W32-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX1030W32-NEXT:    s_subb_u32 s11, s11, s5
+; GFX1030W32-NEXT:    s_sub_i32 s13, s10, s4
+; GFX1030W32-NEXT:    s_cselect_b32 s14, 1, 0
+; GFX1030W32-NEXT:    s_cmp_lg_u32 s14, 0
+; GFX1030W32-NEXT:    s_subb_u32 s11, s11, 0
+; GFX1030W32-NEXT:    s_cmp_ge_u32 s11, s5
+; GFX1030W32-NEXT:    s_cselect_b32 s14, -1, 0
+; GFX1030W32-NEXT:    s_cmp_ge_u32 s13, s4
+; GFX1030W32-NEXT:    s_cselect_b32 s13, -1, 0
+; GFX1030W32-NEXT:    s_cmp_eq_u32 s11, s5
+; GFX1030W32-NEXT:    s_cselect_b32 s11, s13, s14
+; GFX1030W32-NEXT:    s_add_u32 s13, s7, 1
+; GFX1030W32-NEXT:    s_addc_u32 s14, s8, 0
+; GFX1030W32-NEXT:    s_add_u32 s15, s7, 2
+; GFX1030W32-NEXT:    s_addc_u32 s16, s8, 0
+; GFX1030W32-NEXT:    s_cmp_lg_u32 s11, 0
+; GFX1030W32-NEXT:    s_cselect_b32 s11, s15, s13
+; GFX1030W32-NEXT:    s_cselect_b32 s13, s16, s14
 ; GFX1030W32-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX1030W32-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v1
-; GFX1030W32-NEXT:    s_subb_u32 s6, s6, 0
-; GFX1030W32-NEXT:    s_cmp_ge_u32 s6, s3
-; GFX1030W32-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc_lo
-; GFX1030W32-NEXT:    s_cselect_b32 s12, -1, 0
-; GFX1030W32-NEXT:    s_cmp_eq_u32 s6, s3
-; GFX1030W32-NEXT:    s_cselect_b32 vcc_lo, -1, 0
-; GFX1030W32-NEXT:    s_add_u32 s6, s1, 1
-; GFX1030W32-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX1030W32-NEXT:    s_addc_u32 s12, s5, 0
-; GFX1030W32-NEXT:    s_add_u32 s13, s1, 2
-; GFX1030W32-NEXT:    s_addc_u32 s14, s5, 0
-; GFX1030W32-NEXT:    s_cmp_lg_u32 s7, 0
-; GFX1030W32-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v0
-; GFX1030W32-NEXT:    s_subb_u32 s0, s11, s0
-; GFX1030W32-NEXT:    v_mov_b32_e32 v2, s13
-; GFX1030W32-NEXT:    s_cmp_ge_u32 s0, s3
-; GFX1030W32-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc_lo
-; GFX1030W32-NEXT:    s_cselect_b32 s7, -1, 0
-; GFX1030W32-NEXT:    s_cmp_eq_u32 s0, s3
-; GFX1030W32-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v1
-; GFX1030W32-NEXT:    s_cselect_b32 s0, -1, 0
-; GFX1030W32-NEXT:    v_mov_b32_e32 v1, s14
-; GFX1030W32-NEXT:    v_cndmask_b32_e64 v0, s7, v0, s0
-; GFX1030W32-NEXT:    v_cndmask_b32_e32 v2, s6, v2, vcc_lo
-; GFX1030W32-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX1030W32-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX1030W32-NEXT:    v_cndmask_b32_e32 v1, s5, v1, vcc_lo
-; GFX1030W32-NEXT:    v_cndmask_b32_e32 v0, s1, v2, vcc_lo
-; GFX1030W32-NEXT:    s_andn2_b32 vcc_lo, exec_lo, s4
+; GFX1030W32-NEXT:    s_subb_u32 s3, s3, s9
+; GFX1030W32-NEXT:    s_cmp_ge_u32 s3, s5
+; GFX1030W32-NEXT:    s_cselect_b32 s9, -1, 0
+; GFX1030W32-NEXT:    s_cmp_ge_u32 s10, s4
+; GFX1030W32-NEXT:    s_cselect_b32 s10, -1, 0
+; GFX1030W32-NEXT:    s_cmp_eq_u32 s3, s5
+; GFX1030W32-NEXT:    s_cselect_b32 s3, s10, s9
+; GFX1030W32-NEXT:    s_cmp_lg_u32 s3, 0
+; GFX1030W32-NEXT:    s_cselect_b32 s9, s13, s8
+; GFX1030W32-NEXT:    s_cselect_b32 s8, s11, s7
+; GFX1030W32-NEXT:    s_andn2_b32 vcc_lo, exec_lo, s6
 ; GFX1030W32-NEXT:    s_cbranch_vccnz .LBB16_3
 ; GFX1030W32-NEXT:  .LBB16_2:
-; GFX1030W32-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX1030W32-NEXT:    s_sub_i32 s1, 0, s2
+; GFX1030W32-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GFX1030W32-NEXT:    s_sub_i32 s5, 0, s4
+; GFX1030W32-NEXT:    s_mov_b32 s9, 0
 ; GFX1030W32-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX1030W32-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX1030W32-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1030W32-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1030W32-NEXT:    s_mul_i32 s1, s1, s0
-; GFX1030W32-NEXT:    s_mul_hi_u32 s1, s0, s1
-; GFX1030W32-NEXT:    s_add_i32 s0, s0, s1
-; GFX1030W32-NEXT:    s_mul_hi_u32 s0, s10, s0
-; GFX1030W32-NEXT:    s_mul_i32 s1, s0, s2
-; GFX1030W32-NEXT:    s_add_i32 s3, s0, 1
-; GFX1030W32-NEXT:    s_sub_i32 s1, s10, s1
-; GFX1030W32-NEXT:    s_sub_i32 s4, s1, s2
-; GFX1030W32-NEXT:    s_cmp_ge_u32 s1, s2
-; GFX1030W32-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX1030W32-NEXT:    s_cselect_b32 s1, s4, s1
-; GFX1030W32-NEXT:    s_add_i32 s3, s0, 1
-; GFX1030W32-NEXT:    s_cmp_ge_u32 s1, s2
-; GFX1030W32-NEXT:    s_mov_b32 s1, 0
-; GFX1030W32-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX1030W32-NEXT:    v_mov_b32_e32 v0, s0
-; GFX1030W32-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1030W32-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX1030W32-NEXT:    s_mul_i32 s5, s5, s3
+; GFX1030W32-NEXT:    s_mul_hi_u32 s5, s3, s5
+; GFX1030W32-NEXT:    s_add_i32 s3, s3, s5
+; GFX1030W32-NEXT:    s_mul_hi_u32 s3, s2, s3
+; GFX1030W32-NEXT:    s_mul_i32 s5, s3, s4
+; GFX1030W32-NEXT:    s_sub_i32 s2, s2, s5
+; GFX1030W32-NEXT:    s_add_i32 s5, s3, 1
+; GFX1030W32-NEXT:    s_sub_i32 s6, s2, s4
+; GFX1030W32-NEXT:    s_cmp_ge_u32 s2, s4
+; GFX1030W32-NEXT:    s_cselect_b32 s3, s5, s3
+; GFX1030W32-NEXT:    s_cselect_b32 s2, s6, s2
+; GFX1030W32-NEXT:    s_add_i32 s5, s3, 1
+; GFX1030W32-NEXT:    s_cmp_ge_u32 s2, s4
+; GFX1030W32-NEXT:    s_cselect_b32 s8, s5, s3
 ; GFX1030W32-NEXT:  .LBB16_3:
+; GFX1030W32-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX1030W32-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    s_endpgm
 ; GFX1030W32-NEXT:  .LBB16_4:
-; GFX1030W32-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GFX1030W32-NEXT:    ; implicit-def: $sgpr8_sgpr9
 ; GFX1030W32-NEXT:    s_branch .LBB16_2
 ;
 ; GFX1030W64-LABEL: sudiv64:
 ; GFX1030W64:       ; %bb.0:
 ; GFX1030W64-NEXT:    s_clause 0x1
-; GFX1030W64-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GFX1030W64-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x34
+; GFX1030W64-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX1030W64-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1030W64-NEXT:    s_or_b64 s[0:1], s[10:11], s[2:3]
-; GFX1030W64-NEXT:    s_mov_b32 s0, 0
-; GFX1030W64-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX1030W64-NEXT:    s_or_b64 s[6:7], s[2:3], s[4:5]
+; GFX1030W64-NEXT:    s_mov_b32 s6, 0
+; GFX1030W64-NEXT:    s_cmp_lg_u64 s[6:7], 0
 ; GFX1030W64-NEXT:    s_cbranch_scc0 .LBB16_4
 ; GFX1030W64-NEXT:  ; %bb.1:
-; GFX1030W64-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX1030W64-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GFX1030W64-NEXT:    s_sub_u32 s5, 0, s2
-; GFX1030W64-NEXT:    s_subb_u32 s6, 0, s3
+; GFX1030W64-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GFX1030W64-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GFX1030W64-NEXT:    s_sub_u32 s9, 0, s4
+; GFX1030W64-NEXT:    s_subb_u32 s10, 0, s5
 ; GFX1030W64-NEXT:    v_fmamk_f32 v0, v1, 0x4f800000, v0
 ; GFX1030W64-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX1030W64-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2789,160 +2774,158 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1030W64-NEXT:    v_fmamk_f32 v0, v1, 0xcf800000, v0
 ; GFX1030W64-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX1030W64-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1030W64-NEXT:    v_readfirstlane_b32 s4, v1
-; GFX1030W64-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1030W64-NEXT:    s_mul_i32 s1, s5, s4
-; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s5, s0
-; GFX1030W64-NEXT:    s_mul_i32 s7, s6, s0
-; GFX1030W64-NEXT:    s_add_i32 s1, s12, s1
-; GFX1030W64-NEXT:    s_mul_i32 s13, s5, s0
-; GFX1030W64-NEXT:    s_add_i32 s1, s1, s7
-; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s0, s13
-; GFX1030W64-NEXT:    s_mul_hi_u32 s14, s4, s13
-; GFX1030W64-NEXT:    s_mul_i32 s7, s4, s13
-; GFX1030W64-NEXT:    s_mul_hi_u32 s13, s0, s1
-; GFX1030W64-NEXT:    s_mul_i32 s0, s0, s1
-; GFX1030W64-NEXT:    s_mul_hi_u32 s15, s4, s1
-; GFX1030W64-NEXT:    s_add_u32 s0, s12, s0
-; GFX1030W64-NEXT:    s_addc_u32 s12, 0, s13
-; GFX1030W64-NEXT:    s_add_u32 s0, s0, s7
-; GFX1030W64-NEXT:    s_mul_i32 s1, s4, s1
-; GFX1030W64-NEXT:    s_addc_u32 s0, s12, s14
-; GFX1030W64-NEXT:    s_addc_u32 s7, s15, 0
-; GFX1030W64-NEXT:    s_add_u32 s0, s0, s1
-; GFX1030W64-NEXT:    s_addc_u32 s7, 0, s7
-; GFX1030W64-NEXT:    v_add_co_u32 v0, s[0:1], v0, s0
-; GFX1030W64-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1030W64-NEXT:    s_addc_u32 s4, s4, s7
-; GFX1030W64-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1030W64-NEXT:    s_mul_i32 s1, s5, s4
-; GFX1030W64-NEXT:    s_mul_hi_u32 s7, s5, s0
-; GFX1030W64-NEXT:    s_mul_i32 s6, s6, s0
-; GFX1030W64-NEXT:    s_add_i32 s1, s7, s1
-; GFX1030W64-NEXT:    s_mul_i32 s5, s5, s0
-; GFX1030W64-NEXT:    s_add_i32 s1, s1, s6
-; GFX1030W64-NEXT:    s_mul_hi_u32 s7, s4, s5
-; GFX1030W64-NEXT:    s_mul_i32 s12, s4, s5
-; GFX1030W64-NEXT:    s_mul_hi_u32 s5, s0, s5
-; GFX1030W64-NEXT:    s_mul_hi_u32 s13, s0, s1
-; GFX1030W64-NEXT:    s_mul_i32 s0, s0, s1
-; GFX1030W64-NEXT:    s_mul_hi_u32 s6, s4, s1
-; GFX1030W64-NEXT:    s_add_u32 s0, s5, s0
-; GFX1030W64-NEXT:    s_addc_u32 s5, 0, s13
-; GFX1030W64-NEXT:    s_add_u32 s0, s0, s12
-; GFX1030W64-NEXT:    s_mul_i32 s1, s4, s1
-; GFX1030W64-NEXT:    s_addc_u32 s0, s5, s7
-; GFX1030W64-NEXT:    s_addc_u32 s5, s6, 0
-; GFX1030W64-NEXT:    s_add_u32 s0, s0, s1
-; GFX1030W64-NEXT:    s_addc_u32 s5, 0, s5
-; GFX1030W64-NEXT:    v_add_co_u32 v0, s[0:1], v0, s0
-; GFX1030W64-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1030W64-NEXT:    s_addc_u32 s0, s4, s5
-; GFX1030W64-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1030W64-NEXT:    s_mul_i32 s5, s10, s0
-; GFX1030W64-NEXT:    s_mul_hi_u32 s4, s10, s0
-; GFX1030W64-NEXT:    s_mul_hi_u32 s6, s11, s0
-; GFX1030W64-NEXT:    s_mul_i32 s0, s11, s0
-; GFX1030W64-NEXT:    s_mul_hi_u32 s7, s10, s1
-; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s11, s1
-; GFX1030W64-NEXT:    s_mul_i32 s1, s11, s1
-; GFX1030W64-NEXT:    s_add_u32 s5, s7, s5
-; GFX1030W64-NEXT:    s_addc_u32 s4, 0, s4
-; GFX1030W64-NEXT:    s_add_u32 s1, s5, s1
-; GFX1030W64-NEXT:    s_addc_u32 s1, s4, s12
-; GFX1030W64-NEXT:    s_addc_u32 s4, s6, 0
-; GFX1030W64-NEXT:    s_add_u32 s6, s1, s0
-; GFX1030W64-NEXT:    s_addc_u32 s7, 0, s4
-; GFX1030W64-NEXT:    s_mul_hi_u32 s0, s2, s6
-; GFX1030W64-NEXT:    s_mul_i32 s1, s2, s7
-; GFX1030W64-NEXT:    s_mul_i32 s5, s2, s6
-; GFX1030W64-NEXT:    s_add_i32 s12, s0, s1
-; GFX1030W64-NEXT:    v_sub_co_u32 v0, s[0:1], s10, s5
-; GFX1030W64-NEXT:    s_mul_i32 s4, s3, s6
-; GFX1030W64-NEXT:    s_add_i32 s12, s12, s4
-; GFX1030W64-NEXT:    v_sub_co_u32 v1, s[4:5], v0, s2
-; GFX1030W64-NEXT:    s_sub_i32 s13, s11, s12
-; GFX1030W64-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1030W64-NEXT:    s_subb_u32 s13, s13, s3
-; GFX1030W64-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX1030W64-NEXT:    v_cmp_le_u32_e32 vcc, s2, v1
-; GFX1030W64-NEXT:    s_subb_u32 s4, s13, 0
-; GFX1030W64-NEXT:    s_cmp_ge_u32 s4, s3
-; GFX1030W64-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
-; GFX1030W64-NEXT:    s_cselect_b32 s5, -1, 0
-; GFX1030W64-NEXT:    s_cmp_eq_u32 s4, s3
-; GFX1030W64-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX1030W64-NEXT:    s_add_u32 s4, s6, 1
-; GFX1030W64-NEXT:    v_cndmask_b32_e32 v1, s5, v1, vcc
-; GFX1030W64-NEXT:    s_addc_u32 s5, s7, 0
-; GFX1030W64-NEXT:    s_add_u32 s13, s6, 2
-; GFX1030W64-NEXT:    s_addc_u32 s14, s7, 0
-; GFX1030W64-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1030W64-NEXT:    v_cmp_le_u32_e32 vcc, s2, v0
-; GFX1030W64-NEXT:    s_subb_u32 s0, s11, s12
-; GFX1030W64-NEXT:    v_mov_b32_e32 v2, s13
-; GFX1030W64-NEXT:    s_cmp_ge_u32 s0, s3
-; GFX1030W64-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; GFX1030W64-NEXT:    s_cselect_b32 s11, -1, 0
-; GFX1030W64-NEXT:    s_cmp_eq_u32 s0, s3
-; GFX1030W64-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX1030W64-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; GFX1030W64-NEXT:    v_mov_b32_e32 v1, s14
-; GFX1030W64-NEXT:    v_cndmask_b32_e64 v0, s11, v0, s[0:1]
-; GFX1030W64-NEXT:    v_cndmask_b32_e32 v2, s4, v2, vcc
-; GFX1030W64-NEXT:    v_cndmask_b32_e32 v1, s5, v1, vcc
-; GFX1030W64-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX1030W64-NEXT:    v_cndmask_b32_e32 v1, s7, v1, vcc
-; GFX1030W64-NEXT:    v_cndmask_b32_e32 v0, s6, v2, vcc
+; GFX1030W64-NEXT:    v_readfirstlane_b32 s8, v1
+; GFX1030W64-NEXT:    v_readfirstlane_b32 s6, v0
+; GFX1030W64-NEXT:    s_mul_i32 s7, s9, s8
+; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s9, s6
+; GFX1030W64-NEXT:    s_mul_i32 s11, s10, s6
+; GFX1030W64-NEXT:    s_add_i32 s7, s12, s7
+; GFX1030W64-NEXT:    s_mul_i32 s13, s9, s6
+; GFX1030W64-NEXT:    s_add_i32 s7, s7, s11
+; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s6, s13
+; GFX1030W64-NEXT:    s_mul_i32 s15, s6, s7
+; GFX1030W64-NEXT:    s_mul_hi_u32 s14, s8, s13
+; GFX1030W64-NEXT:    s_mul_i32 s11, s8, s13
+; GFX1030W64-NEXT:    s_mul_hi_u32 s13, s6, s7
+; GFX1030W64-NEXT:    s_add_u32 s12, s12, s15
+; GFX1030W64-NEXT:    s_addc_u32 s13, 0, s13
+; GFX1030W64-NEXT:    s_mul_hi_u32 s16, s8, s7
+; GFX1030W64-NEXT:    s_add_u32 s11, s12, s11
+; GFX1030W64-NEXT:    s_mul_i32 s7, s8, s7
+; GFX1030W64-NEXT:    s_addc_u32 s11, s13, s14
+; GFX1030W64-NEXT:    s_addc_u32 s12, s16, 0
+; GFX1030W64-NEXT:    s_add_u32 s7, s11, s7
+; GFX1030W64-NEXT:    s_addc_u32 s11, 0, s12
+; GFX1030W64-NEXT:    s_add_i32 s12, s6, s7
+; GFX1030W64-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GFX1030W64-NEXT:    s_mul_hi_u32 s13, s9, s12
+; GFX1030W64-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1030W64-NEXT:    s_mul_i32 s6, s9, s12
+; GFX1030W64-NEXT:    s_addc_u32 s8, s8, s11
+; GFX1030W64-NEXT:    s_mul_i32 s10, s10, s12
+; GFX1030W64-NEXT:    s_mul_i32 s9, s9, s8
+; GFX1030W64-NEXT:    s_mul_hi_u32 s7, s12, s6
+; GFX1030W64-NEXT:    s_add_i32 s9, s13, s9
+; GFX1030W64-NEXT:    s_mul_hi_u32 s11, s8, s6
+; GFX1030W64-NEXT:    s_add_i32 s9, s9, s10
+; GFX1030W64-NEXT:    s_mul_i32 s6, s8, s6
+; GFX1030W64-NEXT:    s_mul_i32 s14, s12, s9
+; GFX1030W64-NEXT:    s_mul_hi_u32 s13, s12, s9
+; GFX1030W64-NEXT:    s_add_u32 s7, s7, s14
+; GFX1030W64-NEXT:    s_addc_u32 s13, 0, s13
+; GFX1030W64-NEXT:    s_mul_hi_u32 s10, s8, s9
+; GFX1030W64-NEXT:    s_add_u32 s6, s7, s6
+; GFX1030W64-NEXT:    s_mul_i32 s9, s8, s9
+; GFX1030W64-NEXT:    s_addc_u32 s6, s13, s11
+; GFX1030W64-NEXT:    s_addc_u32 s7, s10, 0
+; GFX1030W64-NEXT:    s_add_u32 s6, s6, s9
+; GFX1030W64-NEXT:    s_addc_u32 s9, 0, s7
+; GFX1030W64-NEXT:    s_add_i32 s12, s12, s6
+; GFX1030W64-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GFX1030W64-NEXT:    s_mul_hi_u32 s10, s2, s12
+; GFX1030W64-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1030W64-NEXT:    s_mul_hi_u32 s6, s3, s12
+; GFX1030W64-NEXT:    s_addc_u32 s7, s8, s9
+; GFX1030W64-NEXT:    s_mul_i32 s8, s3, s12
+; GFX1030W64-NEXT:    s_mul_i32 s11, s2, s7
+; GFX1030W64-NEXT:    s_mul_hi_u32 s9, s2, s7
+; GFX1030W64-NEXT:    s_add_u32 s10, s10, s11
+; GFX1030W64-NEXT:    s_addc_u32 s9, 0, s9
+; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s3, s7
+; GFX1030W64-NEXT:    s_add_u32 s8, s10, s8
+; GFX1030W64-NEXT:    s_mul_i32 s7, s3, s7
+; GFX1030W64-NEXT:    s_addc_u32 s6, s9, s6
+; GFX1030W64-NEXT:    s_addc_u32 s8, s12, 0
+; GFX1030W64-NEXT:    s_add_u32 s10, s6, s7
+; GFX1030W64-NEXT:    s_addc_u32 s11, 0, s8
+; GFX1030W64-NEXT:    s_mul_hi_u32 s6, s4, s10
+; GFX1030W64-NEXT:    s_mul_i32 s7, s4, s11
+; GFX1030W64-NEXT:    s_mul_i32 s8, s5, s10
+; GFX1030W64-NEXT:    s_add_i32 s6, s6, s7
+; GFX1030W64-NEXT:    s_add_i32 s12, s6, s8
+; GFX1030W64-NEXT:    s_mul_i32 s6, s4, s10
+; GFX1030W64-NEXT:    s_sub_i32 s8, s3, s12
+; GFX1030W64-NEXT:    s_sub_i32 s13, s2, s6
+; GFX1030W64-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GFX1030W64-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1030W64-NEXT:    s_subb_u32 s14, s8, s5
+; GFX1030W64-NEXT:    s_sub_i32 s15, s13, s4
+; GFX1030W64-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GFX1030W64-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX1030W64-NEXT:    s_subb_u32 s8, s14, 0
+; GFX1030W64-NEXT:    s_cmp_ge_u32 s8, s5
+; GFX1030W64-NEXT:    s_cselect_b32 s9, -1, 0
+; GFX1030W64-NEXT:    s_cmp_ge_u32 s15, s4
+; GFX1030W64-NEXT:    s_cselect_b32 s14, -1, 0
+; GFX1030W64-NEXT:    s_cmp_eq_u32 s8, s5
+; GFX1030W64-NEXT:    s_cselect_b32 s8, s14, s9
+; GFX1030W64-NEXT:    s_add_u32 s9, s10, 1
+; GFX1030W64-NEXT:    s_addc_u32 s14, s11, 0
+; GFX1030W64-NEXT:    s_add_u32 s15, s10, 2
+; GFX1030W64-NEXT:    s_addc_u32 s16, s11, 0
+; GFX1030W64-NEXT:    s_cmp_lg_u32 s8, 0
+; GFX1030W64-NEXT:    s_cselect_b32 s15, s15, s9
+; GFX1030W64-NEXT:    s_cselect_b32 s14, s16, s14
+; GFX1030W64-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GFX1030W64-NEXT:    s_subb_u32 s3, s3, s12
+; GFX1030W64-NEXT:    s_cmp_ge_u32 s3, s5
+; GFX1030W64-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX1030W64-NEXT:    s_cmp_ge_u32 s13, s4
+; GFX1030W64-NEXT:    s_cselect_b32 s7, -1, 0
+; GFX1030W64-NEXT:    s_cmp_eq_u32 s3, s5
+; GFX1030W64-NEXT:    s_cselect_b32 s3, s7, s6
+; GFX1030W64-NEXT:    s_cmp_lg_u32 s3, 0
+; GFX1030W64-NEXT:    s_cselect_b32 s7, s14, s11
+; GFX1030W64-NEXT:    s_cselect_b32 s6, s15, s10
 ; GFX1030W64-NEXT:    s_cbranch_execnz .LBB16_3
 ; GFX1030W64-NEXT:  .LBB16_2:
-; GFX1030W64-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX1030W64-NEXT:    s_sub_i32 s1, 0, s2
+; GFX1030W64-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GFX1030W64-NEXT:    s_sub_i32 s5, 0, s4
+; GFX1030W64-NEXT:    s_mov_b32 s7, 0
 ; GFX1030W64-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX1030W64-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX1030W64-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1030W64-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1030W64-NEXT:    s_mul_i32 s1, s1, s0
-; GFX1030W64-NEXT:    s_mul_hi_u32 s1, s0, s1
-; GFX1030W64-NEXT:    s_add_i32 s0, s0, s1
-; GFX1030W64-NEXT:    s_mul_hi_u32 s0, s10, s0
-; GFX1030W64-NEXT:    s_mul_i32 s1, s0, s2
-; GFX1030W64-NEXT:    s_add_i32 s3, s0, 1
-; GFX1030W64-NEXT:    s_sub_i32 s1, s10, s1
-; GFX1030W64-NEXT:    s_sub_i32 s4, s1, s2
-; GFX1030W64-NEXT:    s_cmp_ge_u32 s1, s2
-; GFX1030W64-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX1030W64-NEXT:    s_cselect_b32 s1, s4, s1
-; GFX1030W64-NEXT:    s_add_i32 s3, s0, 1
-; GFX1030W64-NEXT:    s_cmp_ge_u32 s1, s2
-; GFX1030W64-NEXT:    s_mov_b32 s1, 0
-; GFX1030W64-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX1030W64-NEXT:    v_mov_b32_e32 v0, s0
-; GFX1030W64-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1030W64-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX1030W64-NEXT:    s_mul_i32 s5, s5, s3
+; GFX1030W64-NEXT:    s_mul_hi_u32 s5, s3, s5
+; GFX1030W64-NEXT:    s_add_i32 s3, s3, s5
+; GFX1030W64-NEXT:    s_mul_hi_u32 s3, s2, s3
+; GFX1030W64-NEXT:    s_mul_i32 s5, s3, s4
+; GFX1030W64-NEXT:    s_sub_i32 s2, s2, s5
+; GFX1030W64-NEXT:    s_add_i32 s5, s3, 1
+; GFX1030W64-NEXT:    s_sub_i32 s6, s2, s4
+; GFX1030W64-NEXT:    s_cmp_ge_u32 s2, s4
+; GFX1030W64-NEXT:    s_cselect_b32 s3, s5, s3
+; GFX1030W64-NEXT:    s_cselect_b32 s2, s6, s2
+; GFX1030W64-NEXT:    s_add_i32 s5, s3, 1
+; GFX1030W64-NEXT:    s_cmp_ge_u32 s2, s4
+; GFX1030W64-NEXT:    s_cselect_b32 s6, s5, s3
 ; GFX1030W64-NEXT:  .LBB16_3:
+; GFX1030W64-NEXT:    v_mov_b32_e32 v0, s6
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX1030W64-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    s_endpgm
 ; GFX1030W64-NEXT:  .LBB16_4:
-; GFX1030W64-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GFX1030W64-NEXT:    ; implicit-def: $sgpr6_sgpr7
 ; GFX1030W64-NEXT:    s_branch .LBB16_2
 ;
 ; GFX11-LABEL: sudiv64:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    s_load_b128 s[8:11], s[4:5], 0x24
-; GFX11-NEXT:    s_load_b64 s[2:3], s[4:5], 0x34
+; GFX11-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX11-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_or_b64 s[4:5], s[10:11], s[2:3]
-; GFX11-NEXT:    s_mov_b32 s4, 0
+; GFX11-NEXT:    s_or_b64 s[6:7], s[2:3], s[4:5]
+; GFX11-NEXT:    s_mov_b32 s6, 0
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX11-NEXT:    s_cmp_lg_u64 s[6:7], 0
 ; GFX11-NEXT:    s_cbranch_scc0 .LBB16_4
 ; GFX11-NEXT:  ; %bb.1:
-; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX11-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GFX11-NEXT:    s_sub_u32 s5, 0, s2
-; GFX11-NEXT:    s_subb_u32 s6, 0, s3
+; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GFX11-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GFX11-NEXT:    s_sub_u32 s9, 0, s4
+; GFX11-NEXT:    s_subb_u32 s10, 0, s5
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_fmamk_f32 v0, v1, 0x4f800000, v0
 ; GFX11-NEXT:    v_rcp_f32_e32 v0, v0
@@ -2956,310 +2939,308 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX11-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX11-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_readfirstlane_b32 s0, v1
-; GFX11-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX11-NEXT:    s_mul_i32 s7, s5, s0
-; GFX11-NEXT:    s_mul_hi_u32 s13, s5, s1
-; GFX11-NEXT:    s_mul_i32 s12, s6, s1
-; GFX11-NEXT:    s_add_i32 s7, s13, s7
-; GFX11-NEXT:    s_mul_i32 s14, s5, s1
-; GFX11-NEXT:    s_add_i32 s7, s7, s12
-; GFX11-NEXT:    s_mul_hi_u32 s13, s1, s14
-; GFX11-NEXT:    s_mul_hi_u32 s15, s0, s14
-; GFX11-NEXT:    s_mul_i32 s12, s0, s14
-; GFX11-NEXT:    s_mul_hi_u32 s14, s1, s7
-; GFX11-NEXT:    s_mul_i32 s1, s1, s7
-; GFX11-NEXT:    s_mul_hi_u32 s16, s0, s7
-; GFX11-NEXT:    s_add_u32 s1, s13, s1
-; GFX11-NEXT:    s_addc_u32 s13, 0, s14
-; GFX11-NEXT:    s_add_u32 s1, s1, s12
-; GFX11-NEXT:    s_mul_i32 s7, s0, s7
-; GFX11-NEXT:    s_addc_u32 s1, s13, s15
-; GFX11-NEXT:    s_addc_u32 s12, s16, 0
-; GFX11-NEXT:    s_add_u32 s1, s1, s7
-; GFX11-NEXT:    s_addc_u32 s7, 0, s12
-; GFX11-NEXT:    v_add_co_u32 v0, s1, v0, s1
-; GFX11-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX11-NEXT:    s_addc_u32 s0, s0, s7
-; GFX11-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX11-NEXT:    s_mul_i32 s7, s5, s0
-; GFX11-NEXT:    s_mul_hi_u32 s12, s5, s1
-; GFX11-NEXT:    s_mul_i32 s6, s6, s1
-; GFX11-NEXT:    s_add_i32 s7, s12, s7
-; GFX11-NEXT:    s_mul_i32 s5, s5, s1
-; GFX11-NEXT:    s_add_i32 s7, s7, s6
-; GFX11-NEXT:    s_mul_hi_u32 s12, s0, s5
-; GFX11-NEXT:    s_mul_i32 s13, s0, s5
-; GFX11-NEXT:    s_mul_hi_u32 s5, s1, s5
-; GFX11-NEXT:    s_mul_hi_u32 s14, s1, s7
-; GFX11-NEXT:    s_mul_i32 s1, s1, s7
-; GFX11-NEXT:    s_mul_hi_u32 s6, s0, s7
-; GFX11-NEXT:    s_add_u32 s1, s5, s1
-; GFX11-NEXT:    s_addc_u32 s5, 0, s14
-; GFX11-NEXT:    s_add_u32 s1, s1, s13
-; GFX11-NEXT:    s_mul_i32 s7, s0, s7
-; GFX11-NEXT:    s_addc_u32 s1, s5, s12
-; GFX11-NEXT:    s_addc_u32 s5, s6, 0
-; GFX11-NEXT:    s_add_u32 s1, s1, s7
-; GFX11-NEXT:    s_addc_u32 s5, 0, s5
-; GFX11-NEXT:    v_add_co_u32 v0, s1, v0, s1
-; GFX11-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX11-NEXT:    s_addc_u32 s0, s0, s5
-; GFX11-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX11-NEXT:    s_mul_i32 s6, s10, s0
-; GFX11-NEXT:    s_mul_hi_u32 s5, s10, s0
-; GFX11-NEXT:    s_mul_hi_u32 s7, s11, s0
-; GFX11-NEXT:    s_mul_i32 s0, s11, s0
-; GFX11-NEXT:    s_mul_hi_u32 s12, s10, s1
-; GFX11-NEXT:    s_mul_hi_u32 s13, s11, s1
-; GFX11-NEXT:    s_mul_i32 s1, s11, s1
-; GFX11-NEXT:    s_add_u32 s6, s12, s6
-; GFX11-NEXT:    s_addc_u32 s5, 0, s5
-; GFX11-NEXT:    s_add_u32 s1, s6, s1
-; GFX11-NEXT:    s_addc_u32 s1, s5, s13
-; GFX11-NEXT:    s_addc_u32 s5, s7, 0
-; GFX11-NEXT:    s_add_u32 s1, s1, s0
-; GFX11-NEXT:    s_addc_u32 s5, 0, s5
-; GFX11-NEXT:    s_mul_hi_u32 s0, s2, s1
-; GFX11-NEXT:    s_mul_i32 s7, s2, s5
-; GFX11-NEXT:    s_mul_i32 s12, s2, s1
-; GFX11-NEXT:    s_add_i32 s0, s0, s7
-; GFX11-NEXT:    v_sub_co_u32 v0, s7, s10, s12
-; GFX11-NEXT:    s_mul_i32 s6, s3, s1
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_add_i32 s0, s0, s6
-; GFX11-NEXT:    v_sub_co_u32 v1, s12, v0, s2
-; GFX11-NEXT:    s_sub_i32 s6, s11, s0
-; GFX11-NEXT:    s_cmp_lg_u32 s7, 0
-; GFX11-NEXT:    s_subb_u32 s6, s6, s3
+; GFX11-NEXT:    v_readfirstlane_b32 s7, v1
+; GFX11-NEXT:    v_readfirstlane_b32 s8, v0
+; GFX11-NEXT:    s_mul_i32 s11, s9, s7
+; GFX11-NEXT:    s_mul_hi_u32 s13, s9, s8
+; GFX11-NEXT:    s_mul_i32 s12, s10, s8
+; GFX11-NEXT:    s_add_i32 s11, s13, s11
+; GFX11-NEXT:    s_mul_i32 s14, s9, s8
+; GFX11-NEXT:    s_add_i32 s11, s11, s12
+; GFX11-NEXT:    s_mul_hi_u32 s13, s8, s14
+; GFX11-NEXT:    s_mul_i32 s16, s8, s11
+; GFX11-NEXT:    s_mul_hi_u32 s15, s7, s14
+; GFX11-NEXT:    s_mul_i32 s12, s7, s14
+; GFX11-NEXT:    s_mul_hi_u32 s14, s8, s11
+; GFX11-NEXT:    s_add_u32 s13, s13, s16
+; GFX11-NEXT:    s_addc_u32 s14, 0, s14
+; GFX11-NEXT:    s_mul_hi_u32 s17, s7, s11
+; GFX11-NEXT:    s_add_u32 s12, s13, s12
+; GFX11-NEXT:    s_mul_i32 s11, s7, s11
+; GFX11-NEXT:    s_addc_u32 s12, s14, s15
+; GFX11-NEXT:    s_addc_u32 s13, s17, 0
+; GFX11-NEXT:    s_add_u32 s11, s12, s11
+; GFX11-NEXT:    s_addc_u32 s12, 0, s13
+; GFX11-NEXT:    s_add_i32 s8, s8, s11
+; GFX11-NEXT:    s_cselect_b32 s11, 1, 0
+; GFX11-NEXT:    s_mul_hi_u32 s13, s9, s8
+; GFX11-NEXT:    s_cmp_lg_u32 s11, 0
+; GFX11-NEXT:    s_mul_i32 s11, s9, s8
+; GFX11-NEXT:    s_addc_u32 s7, s7, s12
+; GFX11-NEXT:    s_mul_i32 s10, s10, s8
+; GFX11-NEXT:    s_mul_i32 s9, s9, s7
+; GFX11-NEXT:    s_mul_hi_u32 s12, s8, s11
+; GFX11-NEXT:    s_add_i32 s9, s13, s9
+; GFX11-NEXT:    s_mul_hi_u32 s13, s7, s11
+; GFX11-NEXT:    s_add_i32 s9, s9, s10
+; GFX11-NEXT:    s_mul_i32 s10, s7, s11
+; GFX11-NEXT:    s_mul_i32 s15, s8, s9
+; GFX11-NEXT:    s_mul_hi_u32 s14, s8, s9
+; GFX11-NEXT:    s_add_u32 s12, s12, s15
+; GFX11-NEXT:    s_addc_u32 s14, 0, s14
+; GFX11-NEXT:    s_mul_hi_u32 s11, s7, s9
+; GFX11-NEXT:    s_add_u32 s10, s12, s10
+; GFX11-NEXT:    s_mul_i32 s9, s7, s9
+; GFX11-NEXT:    s_addc_u32 s10, s14, s13
+; GFX11-NEXT:    s_addc_u32 s11, s11, 0
+; GFX11-NEXT:    s_add_u32 s9, s10, s9
+; GFX11-NEXT:    s_addc_u32 s10, 0, s11
+; GFX11-NEXT:    s_add_i32 s8, s8, s9
+; GFX11-NEXT:    s_cselect_b32 s9, 1, 0
+; GFX11-NEXT:    s_mul_hi_u32 s11, s2, s8
+; GFX11-NEXT:    s_cmp_lg_u32 s9, 0
+; GFX11-NEXT:    s_mul_hi_u32 s9, s3, s8
+; GFX11-NEXT:    s_addc_u32 s7, s7, s10
+; GFX11-NEXT:    s_mul_i32 s8, s3, s8
+; GFX11-NEXT:    s_mul_i32 s12, s2, s7
+; GFX11-NEXT:    s_mul_hi_u32 s10, s2, s7
+; GFX11-NEXT:    s_add_u32 s11, s11, s12
+; GFX11-NEXT:    s_addc_u32 s10, 0, s10
+; GFX11-NEXT:    s_mul_hi_u32 s13, s3, s7
+; GFX11-NEXT:    s_add_u32 s8, s11, s8
+; GFX11-NEXT:    s_mul_i32 s7, s3, s7
+; GFX11-NEXT:    s_addc_u32 s8, s10, s9
+; GFX11-NEXT:    s_addc_u32 s9, s13, 0
+; GFX11-NEXT:    s_add_u32 s7, s8, s7
+; GFX11-NEXT:    s_addc_u32 s8, 0, s9
+; GFX11-NEXT:    s_mul_hi_u32 s9, s4, s7
+; GFX11-NEXT:    s_mul_i32 s10, s4, s8
+; GFX11-NEXT:    s_mul_i32 s11, s5, s7
+; GFX11-NEXT:    s_add_i32 s9, s9, s10
+; GFX11-NEXT:    s_mul_i32 s10, s4, s7
+; GFX11-NEXT:    s_add_i32 s9, s9, s11
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_sub_i32 s11, s3, s9
+; GFX11-NEXT:    s_sub_i32 s10, s2, s10
+; GFX11-NEXT:    s_cselect_b32 s12, 1, 0
 ; GFX11-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX11-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v1
-; GFX11-NEXT:    s_subb_u32 s6, s6, 0
+; GFX11-NEXT:    s_subb_u32 s11, s11, s5
+; GFX11-NEXT:    s_sub_i32 s13, s10, s4
+; GFX11-NEXT:    s_cselect_b32 s14, 1, 0
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_cmp_lg_u32 s14, 0
+; GFX11-NEXT:    s_subb_u32 s11, s11, 0
+; GFX11-NEXT:    s_cmp_ge_u32 s11, s5
+; GFX11-NEXT:    s_cselect_b32 s14, -1, 0
+; GFX11-NEXT:    s_cmp_ge_u32 s13, s4
+; GFX11-NEXT:    s_cselect_b32 s13, -1, 0
+; GFX11-NEXT:    s_cmp_eq_u32 s11, s5
+; GFX11-NEXT:    s_cselect_b32 s11, s13, s14
+; GFX11-NEXT:    s_add_u32 s13, s7, 1
+; GFX11-NEXT:    s_addc_u32 s14, s8, 0
+; GFX11-NEXT:    s_add_u32 s15, s7, 2
+; GFX11-NEXT:    s_addc_u32 s16, s8, 0
+; GFX11-NEXT:    s_cmp_lg_u32 s11, 0
+; GFX11-NEXT:    s_cselect_b32 s11, s15, s13
+; GFX11-NEXT:    s_cselect_b32 s13, s16, s14
+; GFX11-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX11-NEXT:    s_subb_u32 s3, s3, s9
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT:    s_cmp_ge_u32 s3, s5
+; GFX11-NEXT:    s_cselect_b32 s9, -1, 0
+; GFX11-NEXT:    s_cmp_ge_u32 s10, s4
+; GFX11-NEXT:    s_cselect_b32 s10, -1, 0
+; GFX11-NEXT:    s_cmp_eq_u32 s3, s5
+; GFX11-NEXT:    s_cselect_b32 s3, s10, s9
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_cmp_ge_u32 s6, s3
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc_lo
-; GFX11-NEXT:    s_cselect_b32 s12, -1, 0
-; GFX11-NEXT:    s_cmp_eq_u32 s6, s3
-; GFX11-NEXT:    s_cselect_b32 vcc_lo, -1, 0
-; GFX11-NEXT:    s_add_u32 s6, s1, 1
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX11-NEXT:    s_addc_u32 s12, s5, 0
-; GFX11-NEXT:    s_add_u32 s13, s1, 2
-; GFX11-NEXT:    s_addc_u32 s14, s5, 0
-; GFX11-NEXT:    s_cmp_lg_u32 s7, 0
-; GFX11-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v0
-; GFX11-NEXT:    s_subb_u32 s0, s11, s0
-; GFX11-NEXT:    v_mov_b32_e32 v2, s13
-; GFX11-NEXT:    s_cmp_ge_u32 s0, s3
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc_lo
-; GFX11-NEXT:    s_cselect_b32 s7, -1, 0
-; GFX11-NEXT:    s_cmp_eq_u32 s0, s3
-; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v1
-; GFX11-NEXT:    s_cselect_b32 s0, -1, 0
-; GFX11-NEXT:    v_mov_b32_e32 v1, s14
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, s7, v0, s0
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, s6, v2, vcc_lo
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX11-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, s5, v1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e32 v0, s1, v2, vcc_lo
-; GFX11-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT:    s_cmp_lg_u32 s3, 0
+; GFX11-NEXT:    s_cselect_b32 s9, s13, s8
+; GFX11-NEXT:    s_cselect_b32 s8, s11, s7
+; GFX11-NEXT:    s_and_not1_b32 vcc_lo, exec_lo, s6
 ; GFX11-NEXT:    s_cbranch_vccnz .LBB16_3
 ; GFX11-NEXT:  .LBB16_2:
-; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX11-NEXT:    s_sub_i32 s1, 0, s2
+; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GFX11-NEXT:    s_sub_i32 s5, 0, s4
+; GFX11-NEXT:    s_mov_b32 s9, 0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
 ; GFX11-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX11-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX11-NEXT:    s_mul_i32 s1, s1, s0
-; GFX11-NEXT:    s_mul_hi_u32 s1, s0, s1
+; GFX11-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX11-NEXT:    s_mul_i32 s5, s5, s3
+; GFX11-NEXT:    s_mul_hi_u32 s5, s3, s5
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_add_i32 s0, s0, s1
-; GFX11-NEXT:    s_mul_hi_u32 s0, s10, s0
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_mul_i32 s1, s0, s2
-; GFX11-NEXT:    s_add_i32 s3, s0, 1
-; GFX11-NEXT:    s_sub_i32 s1, s10, s1
-; GFX11-NEXT:    s_sub_i32 s4, s1, s2
-; GFX11-NEXT:    s_cmp_ge_u32 s1, s2
-; GFX11-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX11-NEXT:    s_cselect_b32 s1, s4, s1
-; GFX11-NEXT:    s_add_i32 s3, s0, 1
-; GFX11-NEXT:    s_cmp_ge_u32 s1, s2
-; GFX11-NEXT:    s_mov_b32 s1, 0
-; GFX11-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT:    s_add_i32 s3, s3, s5
+; GFX11-NEXT:    s_mul_hi_u32 s3, s2, s3
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_mul_i32 s5, s3, s4
+; GFX11-NEXT:    s_sub_i32 s2, s2, s5
+; GFX11-NEXT:    s_add_i32 s5, s3, 1
+; GFX11-NEXT:    s_sub_i32 s6, s2, s4
+; GFX11-NEXT:    s_cmp_ge_u32 s2, s4
+; GFX11-NEXT:    s_cselect_b32 s3, s5, s3
+; GFX11-NEXT:    s_cselect_b32 s2, s6, s2
+; GFX11-NEXT:    s_add_i32 s5, s3, 1
+; GFX11-NEXT:    s_cmp_ge_u32 s2, s4
+; GFX11-NEXT:    s_cselect_b32 s8, s5, s3
 ; GFX11-NEXT:  .LBB16_3:
-; GFX11-NEXT:    v_mov_b32_e32 v2, 0
-; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[8:9]
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT:    v_mov_b32_e32 v0, s8
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s9
+; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    s_endpgm
 ; GFX11-NEXT:  .LBB16_4:
-; GFX11-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GFX11-NEXT:    ; implicit-def: $sgpr8_sgpr9
 ; GFX11-NEXT:    s_branch .LBB16_2
 ;
 ; GFX1250-LABEL: sudiv64:
 ; GFX1250:       ; %bb.0:
-; GFX1250-NEXT:    s_clause 0x1
-; GFX1250-NEXT:    s_load_b128 s[8:11], s[4:5], 0x24
-; GFX1250-NEXT:    s_load_b64 s[2:3], s[4:5], 0x34
+; GFX1250-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT:    s_wait_xcnt 0x0
+; GFX1250-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_or_b64 s[0:1], s[10:11], s[2:3]
+; GFX1250-NEXT:    s_or_b64 s[6:7], s[2:3], s[4:5]
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_and_b64 s[0:1], s[0:1], lit64(0xffffffff00000000)
-; GFX1250-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX1250-NEXT:    s_and_b64 s[6:7], s[6:7], lit64(0xffffffff00000000)
+; GFX1250-NEXT:    s_cmp_lg_u64 s[6:7], 0
 ; GFX1250-NEXT:    s_cbranch_scc0 .LBB16_4
 ; GFX1250-NEXT:  ; %bb.1:
-; GFX1250-NEXT:    s_cvt_f32_u32 s0, s2
-; GFX1250-NEXT:    s_cvt_f32_u32 s1, s3
-; GFX1250-NEXT:    s_sub_nc_u64 s[6:7], 0, s[2:3]
+; GFX1250-NEXT:    s_cvt_f32_u32 s6, s4
+; GFX1250-NEXT:    s_cvt_f32_u32 s7, s5
+; GFX1250-NEXT:    s_sub_nc_u64 s[10:11], 0, s[4:5]
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_2) | instskip(NEXT) | instid1(SALU_CYCLE_3)
-; GFX1250-NEXT:    s_fmac_f32 s0, s1, 0x4f800000
-; GFX1250-NEXT:    v_s_rcp_f32 s0, s0
+; GFX1250-NEXT:    s_fmac_f32 s6, s7, 0x4f800000
+; GFX1250-NEXT:    v_s_rcp_f32 s6, s6
 ; GFX1250-NEXT:    s_delay_alu instid0(TRANS32_DEP_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
-; GFX1250-NEXT:    s_mul_f32 s0, s0, 0x5f7ffffc
-; GFX1250-NEXT:    s_mul_f32 s1, s0, 0x2f800000
+; GFX1250-NEXT:    s_mul_f32 s6, s6, 0x5f7ffffc
+; GFX1250-NEXT:    s_mul_f32 s7, s6, 0x2f800000
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_3) | instskip(NEXT) | instid1(SALU_CYCLE_3)
-; GFX1250-NEXT:    s_trunc_f32 s1, s1
-; GFX1250-NEXT:    s_fmac_f32 s0, s1, 0xcf800000
-; GFX1250-NEXT:    s_cvt_u32_f32 s5, s1
-; GFX1250-NEXT:    s_mov_b32 s1, 0
+; GFX1250-NEXT:    s_trunc_f32 s7, s7
+; GFX1250-NEXT:    s_fmac_f32 s6, s7, 0xcf800000
+; GFX1250-NEXT:    s_cvt_u32_f32 s9, s7
+; GFX1250-NEXT:    s_mov_b32 s7, 0
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_3)
-; GFX1250-NEXT:    s_cvt_u32_f32 s4, s0
-; GFX1250-NEXT:    s_mul_u64 s[12:13], s[6:7], s[4:5]
+; GFX1250-NEXT:    s_cvt_u32_f32 s8, s6
+; GFX1250-NEXT:    s_mul_u64 s[12:13], s[10:11], s[8:9]
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_mul_hi_u32 s15, s4, s13
-; GFX1250-NEXT:    s_mul_i32 s14, s4, s13
-; GFX1250-NEXT:    s_mul_hi_u32 s0, s4, s12
-; GFX1250-NEXT:    s_mul_i32 s17, s5, s12
-; GFX1250-NEXT:    s_add_nc_u64 s[14:15], s[0:1], s[14:15]
-; GFX1250-NEXT:    s_mul_hi_u32 s16, s5, s12
-; GFX1250-NEXT:    s_mul_hi_u32 s18, s5, s13
-; GFX1250-NEXT:    s_add_co_u32 s0, s14, s17
-; GFX1250-NEXT:    s_add_co_ci_u32 s0, s15, s16
-; GFX1250-NEXT:    s_mul_i32 s12, s5, s13
+; GFX1250-NEXT:    s_mul_hi_u32 s15, s8, s13
+; GFX1250-NEXT:    s_mul_i32 s14, s8, s13
+; GFX1250-NEXT:    s_mul_hi_u32 s6, s8, s12
+; GFX1250-NEXT:    s_mul_i32 s17, s9, s12
+; GFX1250-NEXT:    s_add_nc_u64 s[14:15], s[6:7], s[14:15]
+; GFX1250-NEXT:    s_mul_hi_u32 s16, s9, s12
+; GFX1250-NEXT:    s_mul_hi_u32 s18, s9, s13
+; GFX1250-NEXT:    s_add_co_u32 s6, s14, s17
+; GFX1250-NEXT:    s_add_co_ci_u32 s6, s15, s16
+; GFX1250-NEXT:    s_mul_i32 s12, s9, s13
 ; GFX1250-NEXT:    s_add_co_ci_u32 s13, s18, 0
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_add_nc_u64 s[12:13], s[0:1], s[12:13]
-; GFX1250-NEXT:    v_add_co_u32 v0, s0, s4, s12
-; GFX1250-NEXT:    s_cmp_lg_u32 s0, 0
-; GFX1250-NEXT:    s_add_co_ci_u32 s5, s5, s13
-; GFX1250-NEXT:    v_readfirstlane_b32 s4, v0
-; GFX1250-NEXT:    s_mul_u64 s[6:7], s[6:7], s[4:5]
+; GFX1250-NEXT:    s_add_nc_u64 s[12:13], s[6:7], s[12:13]
+; GFX1250-NEXT:    s_add_co_i32 s8, s8, s12
+; GFX1250-NEXT:    s_cselect_b32 s6, 1, 0
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1250-NEXT:    s_add_co_ci_u32 s9, s9, s13
+; GFX1250-NEXT:    s_mul_u64 s[10:11], s[10:11], s[8:9]
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_mul_hi_u32 s13, s4, s7
-; GFX1250-NEXT:    s_mul_i32 s12, s4, s7
-; GFX1250-NEXT:    s_mul_hi_u32 s0, s4, s6
-; GFX1250-NEXT:    s_mul_i32 s15, s5, s6
-; GFX1250-NEXT:    s_add_nc_u64 s[12:13], s[0:1], s[12:13]
-; GFX1250-NEXT:    s_mul_hi_u32 s14, s5, s6
-; GFX1250-NEXT:    s_mul_hi_u32 s4, s5, s7
-; GFX1250-NEXT:    s_add_co_u32 s0, s12, s15
-; GFX1250-NEXT:    s_add_co_ci_u32 s0, s13, s14
-; GFX1250-NEXT:    s_mul_i32 s6, s5, s7
-; GFX1250-NEXT:    s_add_co_ci_u32 s7, s4, 0
+; GFX1250-NEXT:    s_mul_hi_u32 s13, s8, s11
+; GFX1250-NEXT:    s_mul_i32 s12, s8, s11
+; GFX1250-NEXT:    s_mul_hi_u32 s6, s8, s10
+; GFX1250-NEXT:    s_mul_i32 s15, s9, s10
+; GFX1250-NEXT:    s_add_nc_u64 s[12:13], s[6:7], s[12:13]
+; GFX1250-NEXT:    s_mul_hi_u32 s14, s9, s10
+; GFX1250-NEXT:    s_mul_hi_u32 s16, s9, s11
+; GFX1250-NEXT:    s_add_co_u32 s6, s12, s15
+; GFX1250-NEXT:    s_add_co_ci_u32 s6, s13, s14
+; GFX1250-NEXT:    s_mul_i32 s10, s9, s11
+; GFX1250-NEXT:    s_add_co_ci_u32 s11, s16, 0
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_add_nc_u64 s[6:7], s[0:1], s[6:7]
-; GFX1250-NEXT:    v_add_co_u32 v0, s0, v0, s6
-; GFX1250-NEXT:    s_cmp_lg_u32 s0, 0
-; GFX1250-NEXT:    s_add_co_ci_u32 s0, s5, s7
-; GFX1250-NEXT:    v_readfirstlane_b32 s7, v0
-; GFX1250-NEXT:    s_mul_hi_u32 s5, s10, s0
-; GFX1250-NEXT:    s_mul_i32 s4, s10, s0
-; GFX1250-NEXT:    s_mul_hi_u32 s12, s11, s0
-; GFX1250-NEXT:    s_mul_i32 s6, s11, s0
-; GFX1250-NEXT:    s_mul_hi_u32 s0, s10, s7
-; GFX1250-NEXT:    s_mul_i32 s13, s11, s7
-; GFX1250-NEXT:    s_add_nc_u64 s[4:5], s[0:1], s[4:5]
-; GFX1250-NEXT:    s_mul_hi_u32 s0, s11, s7
-; GFX1250-NEXT:    s_add_co_u32 s4, s4, s13
-; GFX1250-NEXT:    s_add_co_ci_u32 s0, s5, s0
-; GFX1250-NEXT:    s_add_co_ci_u32 s7, s12, 0
+; GFX1250-NEXT:    s_add_nc_u64 s[10:11], s[6:7], s[10:11]
+; GFX1250-NEXT:    s_add_co_i32 s8, s8, s10
+; GFX1250-NEXT:    s_cselect_b32 s10, 1, 0
+; GFX1250-NEXT:    s_mul_hi_u32 s6, s2, s8
+; GFX1250-NEXT:    s_cmp_lg_u32 s10, 0
+; GFX1250-NEXT:    s_mul_hi_u32 s12, s3, s8
+; GFX1250-NEXT:    s_add_co_ci_u32 s10, s9, s11
+; GFX1250-NEXT:    s_mul_i32 s11, s3, s8
+; GFX1250-NEXT:    s_mul_hi_u32 s9, s2, s10
+; GFX1250-NEXT:    s_mul_i32 s8, s2, s10
+; GFX1250-NEXT:    s_mul_hi_u32 s13, s3, s10
+; GFX1250-NEXT:    s_add_nc_u64 s[8:9], s[6:7], s[8:9]
+; GFX1250-NEXT:    s_mul_i32 s10, s3, s10
+; GFX1250-NEXT:    s_add_co_u32 s6, s8, s11
+; GFX1250-NEXT:    s_add_co_ci_u32 s6, s9, s12
+; GFX1250-NEXT:    s_add_co_ci_u32 s11, s13, 0
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_add_nc_u64 s[4:5], s[0:1], s[6:7]
-; GFX1250-NEXT:    s_and_b64 s[6:7], s[4:5], lit64(0xffffffff00000000)
+; GFX1250-NEXT:    s_add_nc_u64 s[8:9], s[6:7], s[10:11]
+; GFX1250-NEXT:    s_and_b64 s[10:11], s[8:9], lit64(0xffffffff00000000)
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_or_b32 s6, s6, s4
-; GFX1250-NEXT:    s_mul_u64 s[4:5], s[2:3], s[6:7]
-; GFX1250-NEXT:    s_add_nc_u64 s[14:15], s[6:7], 2
-; GFX1250-NEXT:    v_sub_co_u32 v0, s0, s10, s4
-; GFX1250-NEXT:    s_sub_co_i32 s4, s11, s5
-; GFX1250-NEXT:    s_cmp_lg_u32 s0, 0
-; GFX1250-NEXT:    v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
-; GFX1250-NEXT:    v_sub_co_u32 v1, s12, v0, s2
-; GFX1250-NEXT:    s_sub_co_ci_u32 s4, s4, s3
-; GFX1250-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX1250-NEXT:    s_add_nc_u64 s[12:13], s[6:7], 1
-; GFX1250-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v1
-; GFX1250-NEXT:    s_sub_co_ci_u32 s4, s4, 0
+; GFX1250-NEXT:    s_or_b32 s10, s10, s8
+; GFX1250-NEXT:    s_mul_u64 s[8:9], s[4:5], s[10:11]
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_cmp_ge_u32 s4, s3
-; GFX1250-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc_lo
+; GFX1250-NEXT:    s_sub_co_i32 s6, s2, s8
+; GFX1250-NEXT:    s_cselect_b32 s8, 1, 0
+; GFX1250-NEXT:    s_sub_co_i32 s12, s3, s9
+; GFX1250-NEXT:    s_cmp_lg_u32 s8, 0
+; GFX1250-NEXT:    s_sub_co_ci_u32 s12, s12, s5
+; GFX1250-NEXT:    s_sub_co_i32 s13, s6, s4
+; GFX1250-NEXT:    s_cselect_b32 s14, 1, 0
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX1250-NEXT:    s_cmp_lg_u32 s14, 0
+; GFX1250-NEXT:    s_sub_co_ci_u32 s12, s12, 0
+; GFX1250-NEXT:    s_cmp_ge_u32 s12, s5
 ; GFX1250-NEXT:    s_cselect_b32 s14, -1, 0
-; GFX1250-NEXT:    s_cmp_eq_u32 s4, s3
-; GFX1250-NEXT:    s_cselect_b32 vcc_lo, -1, 0
-; GFX1250-NEXT:    s_cmp_lg_u32 s0, 0
-; GFX1250-NEXT:    v_cndmask_b32_e32 v1, s14, v1, vcc_lo
-; GFX1250-NEXT:    v_cmp_le_u32_e32 vcc_lo, s2, v0
-; GFX1250-NEXT:    s_sub_co_ci_u32 s0, s11, s5
+; GFX1250-NEXT:    s_cmp_ge_u32 s13, s4
+; GFX1250-NEXT:    s_cselect_b32 s15, -1, 0
+; GFX1250-NEXT:    s_cmp_eq_u32 s12, s5
+; GFX1250-NEXT:    s_add_nc_u64 s[12:13], s[10:11], 1
+; GFX1250-NEXT:    s_cselect_b32 s16, s15, s14
+; GFX1250-NEXT:    s_add_nc_u64 s[14:15], s[10:11], 2
+; GFX1250-NEXT:    s_cmp_lg_u32 s16, 0
+; GFX1250-NEXT:    s_cselect_b32 s12, s14, s12
+; GFX1250-NEXT:    s_cselect_b32 s13, s15, s13
+; GFX1250-NEXT:    s_cmp_lg_u32 s8, 0
+; GFX1250-NEXT:    s_sub_co_ci_u32 s3, s3, s9
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_cmp_ge_u32 s0, s3
-; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc_lo
-; GFX1250-NEXT:    s_cselect_b32 s4, -1, 0
-; GFX1250-NEXT:    s_cmp_eq_u32 s0, s3
-; GFX1250-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v1
-; GFX1250-NEXT:    s_cselect_b32 s0, -1, 0
-; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
-; GFX1250-NEXT:    v_cndmask_b32_e64 v0, s4, v0, s0
-; GFX1250-NEXT:    v_cndmask_b32_e32 v2, s12, v2, vcc_lo
-; GFX1250-NEXT:    v_cndmask_b32_e32 v1, s13, v3, vcc_lo
-; GFX1250-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX1250-NEXT:    v_cndmask_b32_e32 v1, s7, v1, vcc_lo
-; GFX1250-NEXT:    v_cndmask_b32_e32 v0, s6, v2, vcc_lo
+; GFX1250-NEXT:    s_cmp_ge_u32 s3, s5
+; GFX1250-NEXT:    s_cselect_b32 s8, -1, 0
+; GFX1250-NEXT:    s_cmp_ge_u32 s6, s4
+; GFX1250-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX1250-NEXT:    s_cmp_eq_u32 s3, s5
+; GFX1250-NEXT:    s_cselect_b32 s3, s6, s8
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT:    s_cmp_lg_u32 s3, 0
+; GFX1250-NEXT:    s_cselect_b32 s9, s13, s11
+; GFX1250-NEXT:    s_cselect_b32 s8, s12, s10
 ; GFX1250-NEXT:    s_cbranch_execnz .LBB16_3
 ; GFX1250-NEXT:  .LBB16_2:
-; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GFX1250-NEXT:    s_sub_co_i32 s1, 0, s2
+; GFX1250-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GFX1250-NEXT:    s_sub_co_i32 s5, 0, s4
+; GFX1250-NEXT:    s_mov_b32 s9, 0
 ; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(TRANS32_DEP_1)
 ; GFX1250-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX1250-NEXT:    v_nop
 ; GFX1250-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX1250-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1250-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1250-NEXT:    s_mul_i32 s1, s1, s0
+; GFX1250-NEXT:    v_readfirstlane_b32 s3, v0
+; GFX1250-NEXT:    s_mul_i32 s5, s5, s3
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_mul_hi_u32 s1, s0, s1
-; GFX1250-NEXT:    s_add_co_i32 s0, s0, s1
+; GFX1250-NEXT:    s_mul_hi_u32 s5, s3, s5
+; GFX1250-NEXT:    s_add_co_i32 s3, s3, s5
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_mul_hi_u32 s0, s10, s0
-; GFX1250-NEXT:    s_mul_i32 s1, s0, s2
-; GFX1250-NEXT:    s_add_co_i32 s3, s0, 1
-; GFX1250-NEXT:    s_sub_co_i32 s1, s10, s1
+; GFX1250-NEXT:    s_mul_hi_u32 s3, s2, s3
+; GFX1250-NEXT:    s_mul_i32 s5, s3, s4
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_sub_co_i32 s4, s1, s2
-; GFX1250-NEXT:    s_cmp_ge_u32 s1, s2
-; GFX1250-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX1250-NEXT:    s_cselect_b32 s1, s4, s1
-; GFX1250-NEXT:    s_add_co_i32 s3, s0, 1
-; GFX1250-NEXT:    s_cmp_ge_u32 s1, s2
-; GFX1250-NEXT:    s_mov_b32 s1, 0
-; GFX1250-NEXT:    s_cselect_b32 s0, s3, s0
-; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1250-NEXT:    v_mov_b64_e32 v[0:1], s[0:1]
+; GFX1250-NEXT:    s_sub_co_i32 s2, s2, s5
+; GFX1250-NEXT:    s_add_co_i32 s5, s3, 1
+; GFX1250-NEXT:    s_sub_co_i32 s6, s2, s4
+; GFX1250-NEXT:    s_cmp_ge_u32 s2, s4
+; GFX1250-NEXT:    s_cselect_b32 s3, s5, s3
+; GFX1250-NEXT:    s_cselect_b32 s2, s6, s2
+; GFX1250-NEXT:    s_add_co_i32 s5, s3, 1
+; GFX1250-NEXT:    s_cmp_ge_u32 s2, s4
+; GFX1250-NEXT:    s_cselect_b32 s8, s5, s3
 ; GFX1250-NEXT:  .LBB16_3:
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1250-NEXT:    v_mov_b64_e32 v[0:1], s[8:9]
 ; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[8:9]
+; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX1250-NEXT:    s_endpgm
 ; GFX1250-NEXT:  .LBB16_4:
-; GFX1250-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GFX1250-NEXT:    ; implicit-def: $sgpr8_sgpr9
 ; GFX1250-NEXT:    s_branch .LBB16_2
   %result = udiv i64 %x, %y
   store i64 %result, ptr addrspace(1) %out
diff --git a/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll b/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
index e6f02295e67d5..eee9715f8de5b 100644
--- a/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
+++ b/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
@@ -11,17 +11,17 @@ define i32 @s_add_co_select_user() {
 ; GFX7-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX7-NEXT:    s_load_dword s6, s[4:5], 0x0
 ; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-NEXT:    v_add_i32_e64 v0, s[4:5], s6, s6
+; GFX7-NEXT:    s_add_i32 s7, s6, s6
+; GFX7-NEXT:    s_cselect_b64 s[4:5], 1, 0
 ; GFX7-NEXT:    s_or_b32 s4, s4, s5
 ; GFX7-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX7-NEXT:    s_addc_u32 s7, s6, 0
+; GFX7-NEXT:    s_addc_u32 s8, s6, 0
 ; GFX7-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; GFX7-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; GFX7-NEXT:    s_cselect_b32 s4, s7, 0
+; GFX7-NEXT:    s_cselect_b32 s4, s8, 0
 ; GFX7-NEXT:    s_cmp_gt_u32 s6, 31
-; GFX7-NEXT:    v_mov_b32_e32 v1, s4
-; GFX7-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX7-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX7-NEXT:    s_cselect_b32 s4, s7, s4
+; GFX7-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX7-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: s_add_co_select_user:
@@ -30,16 +30,16 @@ define i32 @s_add_co_select_user() {
 ; GFX9-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9-NEXT:    s_load_dword s6, s[4:5], 0x0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_add_co_u32_e64 v0, s[4:5], s6, s6
+; GFX9-NEXT:    s_add_i32 s7, s6, s6
+; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
 ; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT:    s_addc_u32 s7, s6, 0
+; GFX9-NEXT:    s_addc_u32 s8, s6, 0
 ; GFX9-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; GFX9-NEXT:    s_and_b64 s[4:5], s[4:5], exec
-; GFX9-NEXT:    s_cselect_b32 s4, s7, 0
+; GFX9-NEXT:    s_cselect_b32 s4, s8, 0
 ; GFX9-NEXT:    s_cmp_gt_u32 s6, 31
-; GFX9-NEXT:    v_mov_b32_e32 v1, s4
-; GFX9-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX9-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT:    s_cselect_b32 s4, s7, s4
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: s_add_co_select_user:
@@ -48,15 +48,16 @@ define i32 @s_add_co_select_user() {
 ; GFX10-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX10-NEXT:    s_load_dword s4, s[4:5], 0x0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    v_add_co_u32 v0, s5, s4, s4
-; GFX10-NEXT:    s_cmp_lg_u32 s5, 0
-; GFX10-NEXT:    s_addc_u32 s5, s4, 0
-; GFX10-NEXT:    s_cselect_b32 s6, -1, 0
-; GFX10-NEXT:    s_and_b32 s6, s6, exec_lo
-; GFX10-NEXT:    s_cselect_b32 s5, s5, 0
+; GFX10-NEXT:    s_add_i32 s5, s4, s4
+; GFX10-NEXT:    s_cselect_b32 s6, 1, 0
+; GFX10-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX10-NEXT:    s_addc_u32 s6, s4, 0
+; GFX10-NEXT:    s_cselect_b32 s7, -1, 0
+; GFX10-NEXT:    s_and_b32 s7, s7, exec_lo
+; GFX10-NEXT:    s_cselect_b32 s6, s6, 0
 ; GFX10-NEXT:    s_cmp_gt_u32 s4, 31
-; GFX10-NEXT:    s_cselect_b32 vcc_lo, -1, 0
-; GFX10-NEXT:    v_cndmask_b32_e32 v0, s5, v0, vcc_lo
+; GFX10-NEXT:    s_cselect_b32 s4, s5, s6
+; GFX10-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: s_add_co_select_user:
@@ -65,16 +66,18 @@ define i32 @s_add_co_select_user() {
 ; GFX11-NEXT:    s_mov_b64 s[0:1], 0
 ; GFX11-NEXT:    s_load_b32 s0, s[0:1], 0x0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    v_add_co_u32 v0, s1, s0, s0
-; GFX11-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX11-NEXT:    s_addc_u32 s1, s0, 0
-; GFX11-NEXT:    s_cselect_b32 s2, -1, 0
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_and_b32 s2, s2, exec_lo
-; GFX11-NEXT:    s_cselect_b32 s1, s1, 0
+; GFX11-NEXT:    s_add_i32 s1, s0, s0
+; GFX11-NEXT:    s_cselect_b32 s2, 1, 0
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_cmp_lg_u32 s2, 0
+; GFX11-NEXT:    s_addc_u32 s2, s0, 0
+; GFX11-NEXT:    s_cselect_b32 s3, -1, 0
+; GFX11-NEXT:    s_and_b32 s3, s3, exec_lo
+; GFX11-NEXT:    s_cselect_b32 s2, s2, 0
 ; GFX11-NEXT:    s_cmp_gt_u32 s0, 31
-; GFX11-NEXT:    s_cselect_b32 vcc_lo, -1, 0
-; GFX11-NEXT:    v_cndmask_b32_e32 v0, s1, v0, vcc_lo
+; GFX11-NEXT:    s_cselect_b32 s0, s1, s2
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 bb:
   %i = load volatile i32, ptr addrspace(4) null, align 8
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 18b6138f02109..1111baca8dce5 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -6,8 +6,9 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_sdiv:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0xd
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_ashr_i32 s8, s1, 31
 ; GCN-NEXT:    s_add_u32 s0, s0, s8
@@ -16,126 +17,158 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[8:9]
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s10
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s11
-; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT:    s_sub_u32 s4, 0, s10
-; GCN-NEXT:    s_subb_u32 s5, 0, s11
+; GCN-NEXT:    s_sub_u32 s12, 0, s10
+; GCN-NEXT:    s_subb_u32 s13, 0, s11
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s12, s3, 31
-; GCN-NEXT:    s_add_u32 s2, s2, s12
-; GCN-NEXT:    s_mov_b32 s13, s12
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    s_addc_u32 s3, s3, s12
-; GCN-NEXT:    s_xor_b64 s[2:3], s[2:3], s[12:13]
-; GCN-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v5, s5, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GCN-NEXT:    v_mul_lo_u32 v6, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v8, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s5, v0
-; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s2, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s2, v0
-; GCN-NEXT:    v_mul_hi_u32 v4, s2, v1
-; GCN-NEXT:    v_mul_hi_u32 v5, s3, v1
-; GCN-NEXT:    v_mul_lo_u32 v1, s3, v1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s3, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s3, v0
-; GCN-NEXT:    s_mov_b32 s4, s0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s11, v0
-; GCN-NEXT:    v_mov_b32_e32 v5, s11
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s10, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, s3, v2
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, s2, v3
-; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s10, v3
-; GCN-NEXT:    v_subbrev_u32_e64 v4, s[0:1], 0, v4, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s11, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s10, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s11, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v5, s[0:1], 1, v0
-; GCN-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 2, v0
-; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v5, v7, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v5, v6, v8, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v6, s3
-; GCN-NEXT:    v_subb_u32_e32 v2, vcc, v6, v2, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s11, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s10, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s11, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
-; GCN-NEXT:    s_xor_b64 s[0:1], s[12:13], s[8:9]
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
-; GCN-NEXT:    v_xor_b32_e32 v0, s0, v0
-; GCN-NEXT:    v_xor_b32_e32 v1, s1, v1
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_mul_hi_u32 v2, s12, v0
+; GCN-NEXT:    v_readfirstlane_b32 s14, v1
+; GCN-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-NEXT:    s_mul_i32 s1, s12, s14
+; GCN-NEXT:    v_readfirstlane_b32 s17, v2
+; GCN-NEXT:    s_mul_i32 s15, s13, s0
+; GCN-NEXT:    s_mul_i32 s16, s12, s0
+; GCN-NEXT:    s_add_i32 s1, s17, s1
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s16
+; GCN-NEXT:    s_add_i32 s1, s1, s15
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s1
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s16
+; GCN-NEXT:    v_readfirstlane_b32 s15, v3
+; GCN-NEXT:    s_mul_i32 s17, s0, s1
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s1
+; GCN-NEXT:    s_add_u32 s15, s15, s17
+; GCN-NEXT:    v_readfirstlane_b32 s17, v0
+; GCN-NEXT:    s_addc_u32 s17, 0, s17
+; GCN-NEXT:    s_mul_i32 s16, s14, s16
+; GCN-NEXT:    v_readfirstlane_b32 s18, v4
+; GCN-NEXT:    s_add_u32 s15, s15, s16
+; GCN-NEXT:    s_addc_u32 s15, s17, s18
+; GCN-NEXT:    v_readfirstlane_b32 s16, v1
+; GCN-NEXT:    s_addc_u32 s16, s16, 0
+; GCN-NEXT:    s_mul_i32 s1, s14, s1
+; GCN-NEXT:    s_add_u32 s1, s15, s1
+; GCN-NEXT:    s_addc_u32 s15, 0, s16
+; GCN-NEXT:    s_add_i32 s16, s0, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s16
+; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    v_mul_hi_u32 v0, s12, v0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_addc_u32 s14, s14, s15
+; GCN-NEXT:    s_mul_i32 s0, s12, s14
+; GCN-NEXT:    v_readfirstlane_b32 s1, v0
+; GCN-NEXT:    s_add_i32 s0, s1, s0
+; GCN-NEXT:    s_mul_i32 s13, s13, s16
+; GCN-NEXT:    s_mul_i32 s1, s12, s16
+; GCN-NEXT:    s_add_i32 s0, s0, s13
 ; GCN-NEXT:    v_mov_b32_e32 v2, s1
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mul_hi_u32 v3, s14, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s16, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s14, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s16, v0
+; GCN-NEXT:    s_mul_i32 s13, s16, s0
+; GCN-NEXT:    v_readfirstlane_b32 s17, v2
+; GCN-NEXT:    s_add_u32 s13, s17, s13
+; GCN-NEXT:    v_readfirstlane_b32 s15, v0
+; GCN-NEXT:    s_mul_i32 s1, s14, s1
+; GCN-NEXT:    s_addc_u32 s15, 0, s15
+; GCN-NEXT:    v_readfirstlane_b32 s12, v3
+; GCN-NEXT:    s_add_u32 s1, s13, s1
+; GCN-NEXT:    s_addc_u32 s1, s15, s12
+; GCN-NEXT:    v_readfirstlane_b32 s12, v1
+; GCN-NEXT:    s_addc_u32 s12, s12, 0
+; GCN-NEXT:    s_mul_i32 s0, s14, s0
+; GCN-NEXT:    s_add_u32 s0, s1, s0
+; GCN-NEXT:    s_addc_u32 s12, 0, s12
+; GCN-NEXT:    s_add_i32 s15, s16, s0
+; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_addc_u32 s14, s14, s12
+; GCN-NEXT:    s_ashr_i32 s12, s7, 31
+; GCN-NEXT:    s_add_u32 s0, s6, s12
+; GCN-NEXT:    s_mov_b32 s13, s12
+; GCN-NEXT:    s_addc_u32 s1, s7, s12
+; GCN-NEXT:    s_xor_b64 s[6:7], s[0:1], s[12:13]
+; GCN-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-NEXT:    v_mul_hi_u32 v1, s6, v0
+; GCN-NEXT:    v_mov_b32_e32 v2, s15
+; GCN-NEXT:    v_mul_hi_u32 v3, s6, v2
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    v_readfirstlane_b32 s4, v1
+; GCN-NEXT:    v_mul_hi_u32 v1, s7, v2
+; GCN-NEXT:    s_mul_i32 s1, s6, s14
+; GCN-NEXT:    v_readfirstlane_b32 s16, v3
+; GCN-NEXT:    v_mul_hi_u32 v0, s7, v0
+; GCN-NEXT:    s_add_u32 s1, s16, s1
+; GCN-NEXT:    s_addc_u32 s4, 0, s4
+; GCN-NEXT:    s_mul_i32 s15, s7, s15
+; GCN-NEXT:    v_readfirstlane_b32 s16, v1
+; GCN-NEXT:    s_add_u32 s1, s1, s15
+; GCN-NEXT:    s_addc_u32 s1, s4, s16
+; GCN-NEXT:    v_readfirstlane_b32 s4, v0
+; GCN-NEXT:    s_addc_u32 s4, s4, 0
+; GCN-NEXT:    s_mul_i32 s14, s7, s14
+; GCN-NEXT:    s_add_u32 s14, s1, s14
+; GCN-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-NEXT:    v_mul_hi_u32 v0, s10, v0
+; GCN-NEXT:    s_addc_u32 s15, 0, s4
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_mul_i32 s4, s10, s15
+; GCN-NEXT:    v_readfirstlane_b32 s5, v0
+; GCN-NEXT:    s_add_i32 s4, s5, s4
+; GCN-NEXT:    s_mul_i32 s5, s11, s14
+; GCN-NEXT:    s_add_i32 s16, s4, s5
+; GCN-NEXT:    s_sub_i32 s17, s7, s16
+; GCN-NEXT:    s_mul_i32 s4, s10, s14
+; GCN-NEXT:    s_sub_i32 s6, s6, s4
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_or_b32 s18, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s18, 0
+; GCN-NEXT:    s_subb_u32 s17, s17, s11
+; GCN-NEXT:    s_sub_i32 s19, s6, s10
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_or_b32 s4, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_subb_u32 s4, s17, 0
+; GCN-NEXT:    s_cmp_ge_u32 s4, s11
+; GCN-NEXT:    s_cselect_b32 s5, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s19, s10
+; GCN-NEXT:    s_cselect_b32 s17, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s4, s11
+; GCN-NEXT:    s_cselect_b32 s4, s17, s5
+; GCN-NEXT:    s_add_u32 s5, s14, 1
+; GCN-NEXT:    s_addc_u32 s17, s15, 0
+; GCN-NEXT:    s_add_u32 s19, s14, 2
+; GCN-NEXT:    s_addc_u32 s20, s15, 0
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_cselect_b32 s4, s19, s5
+; GCN-NEXT:    s_cselect_b32 s5, s20, s17
+; GCN-NEXT:    s_cmp_lg_u32 s18, 0
+; GCN-NEXT:    s_subb_u32 s7, s7, s16
+; GCN-NEXT:    s_cmp_ge_u32 s7, s11
+; GCN-NEXT:    s_cselect_b32 s16, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s6, s10
+; GCN-NEXT:    s_cselect_b32 s6, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s7, s11
+; GCN-NEXT:    s_cselect_b32 s6, s6, s16
+; GCN-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-NEXT:    s_cselect_b32 s5, s5, s15
+; GCN-NEXT:    s_cselect_b32 s4, s4, s14
+; GCN-NEXT:    s_xor_b64 s[6:7], s[12:13], s[8:9]
+; GCN-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
+; GCN-NEXT:    s_sub_u32 s4, s4, s6
+; GCN-NEXT:    s_subb_u32 s5, s5, s7
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_sdiv:
@@ -1110,116 +1143,145 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-LABEL: s_test_sdiv_k_num_i64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i32 s8, s3, 31
-; GCN-NEXT:    s_add_u32 s2, s2, s8
-; GCN-NEXT:    s_mov_b32 s9, s8
-; GCN-NEXT:    s_addc_u32 s3, s3, s8
-; GCN-NEXT:    s_xor_b64 s[2:3], s[2:3], s[8:9]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GCN-NEXT:    s_sub_u32 s4, 0, s2
-; GCN-NEXT:    s_subb_u32 s5, 0, s3
+; GCN-NEXT:    s_ashr_i32 s4, s3, 31
+; GCN-NEXT:    s_add_u32 s2, s2, s4
+; GCN-NEXT:    s_mov_b32 s5, s4
+; GCN-NEXT:    s_addc_u32 s3, s3, s4
+; GCN-NEXT:    s_xor_b64 s[6:7], s[2:3], s[4:5]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s7
+; GCN-NEXT:    s_sub_u32 s2, 0, s6
+; GCN-NEXT:    s_subb_u32 s10, 0, s7
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v5, s5, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v6, v1, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v8, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s5, v0
-; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, 24
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, 24
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, 24
-; GCN-NEXT:    v_mov_b32_e32 v4, s3
-; GCN-NEXT:    s_mov_b32 s4, s0
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_mul_hi_u32 v2, s2, v0
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; GCN-NEXT:    v_mul_lo_u32 v2, s2, v0
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 24, v2
-; GCN-NEXT:    v_subb_u32_e64 v3, s[0:1], v3, v4, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s2, v2
-; GCN-NEXT:    v_subbrev_u32_e64 v3, s[0:1], 0, v3, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s3, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v5, v4, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v4, s[0:1], 1, v0
-; GCN-NEXT:    v_addc_u32_e64 v5, s[0:1], 0, 0, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v6, s[0:1], 2, v0
-; GCN-NEXT:    v_addc_u32_e64 v7, s[0:1], 0, 0, s[0:1]
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v3
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s3, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v4, v6, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v5, v7, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s3, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v5, v2, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v4, vcc
-; GCN-NEXT:    v_xor_b32_e32 v0, s8, v0
-; GCN-NEXT:    v_xor_b32_e32 v1, s8, v1
-; GCN-NEXT:    v_mov_b32_e32 v2, s8
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s8, v0
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-NEXT:    v_readfirstlane_b32 s11, v1
+; GCN-NEXT:    v_readfirstlane_b32 s8, v0
+; GCN-NEXT:    s_mul_i32 s9, s2, s11
+; GCN-NEXT:    v_readfirstlane_b32 s14, v2
+; GCN-NEXT:    s_mul_i32 s12, s10, s8
+; GCN-NEXT:    s_mul_i32 s13, s2, s8
+; GCN-NEXT:    s_add_i32 s9, s14, s9
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s13
+; GCN-NEXT:    s_add_i32 s9, s9, s12
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s9
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s13
+; GCN-NEXT:    v_readfirstlane_b32 s12, v3
+; GCN-NEXT:    s_mul_i32 s15, s8, s9
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s9
+; GCN-NEXT:    s_add_u32 s12, s12, s15
+; GCN-NEXT:    v_readfirstlane_b32 s15, v0
+; GCN-NEXT:    s_mul_i32 s13, s11, s13
+; GCN-NEXT:    s_addc_u32 s15, 0, s15
+; GCN-NEXT:    v_readfirstlane_b32 s14, v4
+; GCN-NEXT:    s_add_u32 s12, s12, s13
+; GCN-NEXT:    s_addc_u32 s12, s15, s14
+; GCN-NEXT:    v_readfirstlane_b32 s13, v1
+; GCN-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-NEXT:    s_mul_i32 s9, s11, s9
+; GCN-NEXT:    s_add_u32 s9, s12, s9
+; GCN-NEXT:    s_addc_u32 s12, 0, s13
+; GCN-NEXT:    s_add_i32 s13, s8, s9
+; GCN-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT:    s_or_b32 s8, s8, s9
+; GCN-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-NEXT:    s_addc_u32 s11, s11, s12
+; GCN-NEXT:    s_mul_i32 s8, s2, s11
+; GCN-NEXT:    v_readfirstlane_b32 s9, v0
+; GCN-NEXT:    s_add_i32 s8, s9, s8
+; GCN-NEXT:    s_mul_i32 s10, s10, s13
+; GCN-NEXT:    s_mul_i32 s2, s2, s13
+; GCN-NEXT:    s_add_i32 s8, s8, s10
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-NEXT:    v_mul_hi_u32 v3, s11, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s13, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s11, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s13, v0
+; GCN-NEXT:    s_mul_i32 s10, s13, s8
+; GCN-NEXT:    v_readfirstlane_b32 s14, v2
+; GCN-NEXT:    s_add_u32 s10, s14, s10
+; GCN-NEXT:    v_readfirstlane_b32 s12, v0
+; GCN-NEXT:    s_mul_i32 s2, s11, s2
+; GCN-NEXT:    s_addc_u32 s12, 0, s12
+; GCN-NEXT:    v_readfirstlane_b32 s9, v3
+; GCN-NEXT:    s_add_u32 s2, s10, s2
+; GCN-NEXT:    s_addc_u32 s2, s12, s9
+; GCN-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-NEXT:    s_mul_i32 s8, s11, s8
+; GCN-NEXT:    s_add_u32 s2, s2, s8
+; GCN-NEXT:    s_addc_u32 s10, 0, s9
+; GCN-NEXT:    s_add_i32 s13, s13, s2
+; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GCN-NEXT:    s_or_b32 s2, s8, s9
+; GCN-NEXT:    s_cmp_lg_u32 s2, 0
+; GCN-NEXT:    s_addc_u32 s8, s11, s10
+; GCN-NEXT:    v_mul_hi_u32 v1, s13, 24
+; GCN-NEXT:    v_mul_hi_u32 v0, s8, 24
+; GCN-NEXT:    s_mul_i32 s8, s8, 24
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    v_readfirstlane_b32 s10, v1
+; GCN-NEXT:    v_readfirstlane_b32 s9, v0
+; GCN-NEXT:    s_add_u32 s8, s10, s8
+; GCN-NEXT:    s_addc_u32 s10, 0, s9
+; GCN-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-NEXT:    v_mul_hi_u32 v0, s6, v0
+; GCN-NEXT:    s_mul_i32 s8, s7, s10
+; GCN-NEXT:    v_readfirstlane_b32 s9, v0
+; GCN-NEXT:    s_add_i32 s11, s9, s8
+; GCN-NEXT:    s_sub_i32 s12, 0, s11
+; GCN-NEXT:    s_mul_i32 s8, s6, s10
+; GCN-NEXT:    s_sub_i32 s13, 24, s8
+; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GCN-NEXT:    s_or_b32 s14, s8, s9
+; GCN-NEXT:    s_cmp_lg_u32 s14, 0
+; GCN-NEXT:    s_subb_u32 s12, s12, s7
+; GCN-NEXT:    s_sub_i32 s15, s13, s6
+; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GCN-NEXT:    s_or_b32 s8, s8, s9
+; GCN-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-NEXT:    s_subb_u32 s8, s12, 0
+; GCN-NEXT:    s_cmp_ge_u32 s8, s7
+; GCN-NEXT:    s_cselect_b32 s9, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s15, s6
+; GCN-NEXT:    s_cselect_b32 s12, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s8, s7
+; GCN-NEXT:    s_cselect_b32 s8, s12, s9
+; GCN-NEXT:    s_add_u32 s9, s10, 1
+; GCN-NEXT:    s_addc_u32 s12, 0, 0
+; GCN-NEXT:    s_add_u32 s15, s10, 2
+; GCN-NEXT:    s_addc_u32 s16, 0, 0
+; GCN-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-NEXT:    s_cselect_b32 s8, s15, s9
+; GCN-NEXT:    s_cselect_b32 s9, s16, s12
+; GCN-NEXT:    s_cmp_lg_u32 s14, 0
+; GCN-NEXT:    s_subb_u32 s11, 0, s11
+; GCN-NEXT:    s_cmp_ge_u32 s11, s7
+; GCN-NEXT:    s_cselect_b32 s12, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s13, s6
+; GCN-NEXT:    s_cselect_b32 s6, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s11, s7
+; GCN-NEXT:    s_cselect_b32 s6, s6, s12
+; GCN-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-NEXT:    s_cselect_b32 s7, s9, 0
+; GCN-NEXT:    s_cselect_b32 s6, s8, s10
+; GCN-NEXT:    s_xor_b64 s[6:7], s[6:7], s[4:5]
+; GCN-NEXT:    s_sub_u32 s6, s6, s4
+; GCN-NEXT:    s_subb_u32 s7, s7, s4
+; GCN-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_sdiv_k_num_i64:
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index f614f58d8e1dc..fce960038444a 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -1491,29 +1491,29 @@ define amdgpu_kernel void @srem_v4i32_4(ptr addrspace(1) %out, ptr addrspace(1)
 define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in) {
 ; GCN-LABEL: srem_i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; GCN-NEXT:    v_mov_b32_e32 v0, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    global_load_dwordx4 v[0:3], v0, s[10:11]
+; GCN-NEXT:    global_load_dwordx4 v[0:3], v0, s[2:3]
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_readfirstlane_b32 s7, v1
-; GCN-NEXT:    v_readfirstlane_b32 s6, v0
-; GCN-NEXT:    v_readfirstlane_b32 s5, v3
-; GCN-NEXT:    v_readfirstlane_b32 s4, v2
-; GCN-NEXT:    s_or_b64 s[0:1], s[6:7], s[4:5]
-; GCN-NEXT:    s_mov_b32 s0, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT:    v_readfirstlane_b32 s5, v1
+; GCN-NEXT:    v_readfirstlane_b32 s4, v0
+; GCN-NEXT:    v_readfirstlane_b32 s3, v3
+; GCN-NEXT:    v_readfirstlane_b32 s2, v2
+; GCN-NEXT:    s_or_b64 s[6:7], s[4:5], s[2:3]
+; GCN-NEXT:    s_mov_b32 s6, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[6:7], 0
 ; GCN-NEXT:    s_cbranch_scc0 .LBB8_4
 ; GCN-NEXT:  ; %bb.1:
-; GCN-NEXT:    s_ashr_i32 s0, s5, 31
-; GCN-NEXT:    s_add_u32 s2, s4, s0
-; GCN-NEXT:    s_mov_b32 s1, s0
-; GCN-NEXT:    s_addc_u32 s3, s5, s0
-; GCN-NEXT:    s_xor_b64 s[12:13], s[2:3], s[0:1]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s12
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s13
-; GCN-NEXT:    s_sub_u32 s0, 0, s12
-; GCN-NEXT:    s_subb_u32 s1, 0, s13
+; GCN-NEXT:    s_ashr_i32 s6, s3, 31
+; GCN-NEXT:    s_add_u32 s8, s2, s6
+; GCN-NEXT:    s_mov_b32 s7, s6
+; GCN-NEXT:    s_addc_u32 s9, s3, s6
+; GCN-NEXT:    s_xor_b64 s[8:9], s[8:9], s[6:7]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s9
+; GCN-NEXT:    s_sub_u32 s3, 0, s8
+; GCN-NEXT:    s_subb_u32 s12, 0, s9
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -1522,155 +1522,148 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_readfirstlane_b32 s2, v1
-; GCN-NEXT:    v_readfirstlane_b32 s3, v0
-; GCN-NEXT:    s_mul_i32 s5, s0, s2
-; GCN-NEXT:    s_mul_hi_u32 s15, s0, s3
-; GCN-NEXT:    s_mul_i32 s14, s1, s3
-; GCN-NEXT:    s_add_i32 s5, s15, s5
-; GCN-NEXT:    s_add_i32 s5, s5, s14
-; GCN-NEXT:    s_mul_i32 s16, s0, s3
-; GCN-NEXT:    s_mul_hi_u32 s14, s3, s5
-; GCN-NEXT:    s_mul_i32 s15, s3, s5
-; GCN-NEXT:    s_mul_hi_u32 s3, s3, s16
-; GCN-NEXT:    s_add_u32 s3, s3, s15
+; GCN-NEXT:    v_readfirstlane_b32 s13, v1
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    s_mul_i32 s11, s3, s13
+; GCN-NEXT:    s_mul_hi_u32 s15, s3, s10
+; GCN-NEXT:    s_mul_i32 s14, s12, s10
+; GCN-NEXT:    s_add_i32 s11, s15, s11
+; GCN-NEXT:    s_add_i32 s11, s11, s14
+; GCN-NEXT:    s_mul_i32 s16, s3, s10
+; GCN-NEXT:    s_mul_i32 s15, s10, s11
+; GCN-NEXT:    s_mul_hi_u32 s17, s10, s16
+; GCN-NEXT:    s_mul_hi_u32 s14, s10, s11
+; GCN-NEXT:    s_add_u32 s15, s17, s15
 ; GCN-NEXT:    s_addc_u32 s14, 0, s14
-; GCN-NEXT:    s_mul_hi_u32 s17, s2, s16
-; GCN-NEXT:    s_mul_i32 s16, s2, s16
-; GCN-NEXT:    s_add_u32 s3, s3, s16
-; GCN-NEXT:    s_mul_hi_u32 s15, s2, s5
-; GCN-NEXT:    s_addc_u32 s3, s14, s17
-; GCN-NEXT:    s_addc_u32 s14, s15, 0
-; GCN-NEXT:    s_mul_i32 s5, s2, s5
-; GCN-NEXT:    s_add_u32 s3, s3, s5
+; GCN-NEXT:    s_mul_hi_u32 s18, s13, s16
+; GCN-NEXT:    s_mul_i32 s16, s13, s16
+; GCN-NEXT:    s_add_u32 s15, s15, s16
+; GCN-NEXT:    s_mul_hi_u32 s17, s13, s11
+; GCN-NEXT:    s_addc_u32 s14, s14, s18
+; GCN-NEXT:    s_addc_u32 s15, s17, 0
+; GCN-NEXT:    s_mul_i32 s11, s13, s11
+; GCN-NEXT:    s_add_u32 s11, s14, s11
+; GCN-NEXT:    s_addc_u32 s14, 0, s15
+; GCN-NEXT:    s_add_i32 s15, s10, s11
+; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GCN-NEXT:    s_addc_u32 s13, s13, s14
+; GCN-NEXT:    s_mul_i32 s10, s3, s13
+; GCN-NEXT:    s_mul_hi_u32 s11, s3, s15
+; GCN-NEXT:    s_add_i32 s10, s11, s10
+; GCN-NEXT:    s_mul_i32 s12, s12, s15
+; GCN-NEXT:    s_add_i32 s10, s10, s12
+; GCN-NEXT:    s_mul_i32 s3, s3, s15
+; GCN-NEXT:    s_mul_hi_u32 s12, s13, s3
+; GCN-NEXT:    s_mul_i32 s14, s13, s3
+; GCN-NEXT:    s_mul_i32 s17, s15, s10
+; GCN-NEXT:    s_mul_hi_u32 s3, s15, s3
+; GCN-NEXT:    s_mul_hi_u32 s16, s15, s10
+; GCN-NEXT:    s_add_u32 s3, s3, s17
+; GCN-NEXT:    s_addc_u32 s16, 0, s16
+; GCN-NEXT:    s_add_u32 s3, s3, s14
+; GCN-NEXT:    s_mul_hi_u32 s11, s13, s10
+; GCN-NEXT:    s_addc_u32 s3, s16, s12
+; GCN-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-NEXT:    s_mul_i32 s10, s13, s10
+; GCN-NEXT:    s_add_u32 s3, s3, s10
+; GCN-NEXT:    s_addc_u32 s12, 0, s11
+; GCN-NEXT:    s_add_i32 s15, s15, s3
+; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[10:11], 0
+; GCN-NEXT:    s_addc_u32 s3, s13, s12
+; GCN-NEXT:    s_ashr_i32 s10, s5, 31
+; GCN-NEXT:    s_add_u32 s12, s4, s10
+; GCN-NEXT:    s_mov_b32 s11, s10
+; GCN-NEXT:    s_addc_u32 s13, s5, s10
+; GCN-NEXT:    s_xor_b64 s[12:13], s[12:13], s[10:11]
+; GCN-NEXT:    s_mul_i32 s14, s12, s3
+; GCN-NEXT:    s_mul_hi_u32 s16, s12, s15
+; GCN-NEXT:    s_mul_hi_u32 s5, s12, s3
+; GCN-NEXT:    s_add_u32 s14, s16, s14
+; GCN-NEXT:    s_addc_u32 s5, 0, s5
+; GCN-NEXT:    s_mul_hi_u32 s17, s13, s15
+; GCN-NEXT:    s_mul_i32 s15, s13, s15
+; GCN-NEXT:    s_add_u32 s14, s14, s15
+; GCN-NEXT:    s_mul_hi_u32 s16, s13, s3
+; GCN-NEXT:    s_addc_u32 s5, s5, s17
+; GCN-NEXT:    s_addc_u32 s14, s16, 0
+; GCN-NEXT:    s_mul_i32 s3, s13, s3
+; GCN-NEXT:    s_add_u32 s3, s5, s3
 ; GCN-NEXT:    s_addc_u32 s5, 0, s14
-; GCN-NEXT:    v_add_co_u32_e32 v0, vcc, s3, v0
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_addc_u32 s2, s2, s5
-; GCN-NEXT:    v_readfirstlane_b32 s5, v0
-; GCN-NEXT:    s_mul_i32 s3, s0, s2
-; GCN-NEXT:    s_mul_hi_u32 s14, s0, s5
-; GCN-NEXT:    s_add_i32 s3, s14, s3
-; GCN-NEXT:    s_mul_i32 s1, s1, s5
-; GCN-NEXT:    s_add_i32 s3, s3, s1
-; GCN-NEXT:    s_mul_i32 s0, s0, s5
-; GCN-NEXT:    s_mul_hi_u32 s14, s2, s0
-; GCN-NEXT:    s_mul_i32 s15, s2, s0
-; GCN-NEXT:    s_mul_i32 s17, s5, s3
-; GCN-NEXT:    s_mul_hi_u32 s0, s5, s0
-; GCN-NEXT:    s_mul_hi_u32 s16, s5, s3
-; GCN-NEXT:    s_add_u32 s0, s0, s17
-; GCN-NEXT:    s_addc_u32 s5, 0, s16
-; GCN-NEXT:    s_add_u32 s0, s0, s15
-; GCN-NEXT:    s_mul_hi_u32 s1, s2, s3
-; GCN-NEXT:    s_addc_u32 s0, s5, s14
-; GCN-NEXT:    s_addc_u32 s1, s1, 0
-; GCN-NEXT:    s_mul_i32 s3, s2, s3
-; GCN-NEXT:    s_add_u32 s0, s0, s3
-; GCN-NEXT:    s_addc_u32 s1, 0, s1
-; GCN-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_addc_u32 s2, s2, s1
-; GCN-NEXT:    s_ashr_i32 s14, s7, 31
-; GCN-NEXT:    s_add_u32 s0, s6, s14
-; GCN-NEXT:    s_mov_b32 s15, s14
-; GCN-NEXT:    s_addc_u32 s1, s7, s14
-; GCN-NEXT:    s_xor_b64 s[16:17], s[0:1], s[14:15]
-; GCN-NEXT:    v_readfirstlane_b32 s3, v0
-; GCN-NEXT:    s_mul_i32 s1, s16, s2
-; GCN-NEXT:    s_mul_hi_u32 s5, s16, s3
-; GCN-NEXT:    s_mul_hi_u32 s0, s16, s2
-; GCN-NEXT:    s_add_u32 s1, s5, s1
-; GCN-NEXT:    s_addc_u32 s0, 0, s0
-; GCN-NEXT:    s_mul_hi_u32 s7, s17, s3
-; GCN-NEXT:    s_mul_i32 s3, s17, s3
-; GCN-NEXT:    s_add_u32 s1, s1, s3
-; GCN-NEXT:    s_mul_hi_u32 s5, s17, s2
-; GCN-NEXT:    s_addc_u32 s0, s0, s7
-; GCN-NEXT:    s_addc_u32 s1, s5, 0
-; GCN-NEXT:    s_mul_i32 s2, s17, s2
-; GCN-NEXT:    s_add_u32 s0, s0, s2
-; GCN-NEXT:    s_addc_u32 s1, 0, s1
-; GCN-NEXT:    s_mul_i32 s1, s12, s1
-; GCN-NEXT:    s_mul_hi_u32 s2, s12, s0
-; GCN-NEXT:    s_add_i32 s1, s2, s1
-; GCN-NEXT:    s_mul_i32 s2, s13, s0
-; GCN-NEXT:    s_mul_i32 s0, s12, s0
-; GCN-NEXT:    s_add_i32 s5, s1, s2
-; GCN-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-NEXT:    s_sub_i32 s1, s17, s5
-; GCN-NEXT:    v_sub_co_u32_e32 v0, vcc, s16, v0
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_subb_u32 s7, s1, s13
-; GCN-NEXT:    v_subrev_co_u32_e64 v1, s[0:1], s12, v0
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    s_subb_u32 s15, s7, 0
-; GCN-NEXT:    s_cmp_ge_u32 s15, s13
-; GCN-NEXT:    s_cselect_b32 s16, -1, 0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v1
-; GCN-NEXT:    s_cmp_eq_u32 s15, s13
-; GCN-NEXT:    v_cndmask_b32_e64 v2, 0, -1, s[2:3]
-; GCN-NEXT:    v_mov_b32_e32 v3, s16
-; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v3, v2, s[2:3]
-; GCN-NEXT:    s_subb_u32 s2, s7, s13
-; GCN-NEXT:    v_subrev_co_u32_e64 v3, s[0:1], s12, v1
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    s_subb_u32 s2, s2, 0
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v2, s15
-; GCN-NEXT:    v_mov_b32_e32 v3, s2
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
-; GCN-NEXT:    s_subb_u32 s0, s17, s5
-; GCN-NEXT:    s_cmp_ge_u32 s0, s13
-; GCN-NEXT:    s_cselect_b32 s1, -1, 0
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
-; GCN-NEXT:    s_cmp_eq_u32 s0, s13
-; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GCN-NEXT:    v_mov_b32_e32 v4, s1
-; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
-; GCN-NEXT:    v_mov_b32_e32 v4, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
-; GCN-NEXT:    v_xor_b32_e32 v0, s14, v0
-; GCN-NEXT:    v_xor_b32_e32 v1, s14, v2
-; GCN-NEXT:    v_mov_b32_e32 v2, s14
-; GCN-NEXT:    v_subrev_co_u32_e32 v0, vcc, s14, v0
-; GCN-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-NEXT:    s_mul_i32 s5, s8, s5
+; GCN-NEXT:    s_mul_hi_u32 s14, s8, s3
+; GCN-NEXT:    s_add_i32 s5, s14, s5
+; GCN-NEXT:    s_mul_i32 s14, s9, s3
+; GCN-NEXT:    s_add_i32 s5, s5, s14
+; GCN-NEXT:    s_sub_i32 s16, s13, s5
+; GCN-NEXT:    s_mul_i32 s3, s8, s3
+; GCN-NEXT:    s_sub_i32 s3, s12, s3
+; GCN-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; GCN-NEXT:    s_subb_u32 s12, s16, s9
+; GCN-NEXT:    s_sub_i32 s18, s3, s8
+; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT:    s_subb_u32 s19, s12, 0
+; GCN-NEXT:    s_cmp_ge_u32 s19, s9
+; GCN-NEXT:    s_cselect_b32 s20, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s18, s8
+; GCN-NEXT:    s_cselect_b32 s21, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s19, s9
+; GCN-NEXT:    s_cselect_b32 s20, s21, s20
+; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT:    s_subb_u32 s12, s12, s9
+; GCN-NEXT:    s_sub_i32 s21, s18, s8
+; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT:    s_subb_u32 s12, s12, 0
+; GCN-NEXT:    s_cmp_lg_u32 s20, 0
+; GCN-NEXT:    s_cselect_b32 s16, s21, s18
+; GCN-NEXT:    s_cselect_b32 s12, s12, s19
+; GCN-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; GCN-NEXT:    s_subb_u32 s5, s13, s5
+; GCN-NEXT:    s_cmp_ge_u32 s5, s9
+; GCN-NEXT:    s_cselect_b32 s13, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s3, s8
+; GCN-NEXT:    s_cselect_b32 s8, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s5, s9
+; GCN-NEXT:    s_cselect_b32 s8, s8, s13
+; GCN-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-NEXT:    s_cselect_b32 s9, s12, s5
+; GCN-NEXT:    s_cselect_b32 s8, s16, s3
+; GCN-NEXT:    s_xor_b64 s[8:9], s[8:9], s[10:11]
+; GCN-NEXT:    s_sub_u32 s8, s8, s10
+; GCN-NEXT:    s_subb_u32 s9, s9, s10
 ; GCN-NEXT:    s_cbranch_execnz .LBB8_3
 ; GCN-NEXT:  .LBB8_2:
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GCN-NEXT:    s_sub_i32 s0, 0, s4
-; GCN-NEXT:    s_mov_b32 s1, 0
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-NEXT:    s_sub_i32 s3, 0, s2
+; GCN-NEXT:    s_mov_b32 s9, 0
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_readfirstlane_b32 s2, v0
-; GCN-NEXT:    s_mul_i32 s0, s0, s2
-; GCN-NEXT:    s_mul_hi_u32 s0, s2, s0
-; GCN-NEXT:    s_add_i32 s2, s2, s0
-; GCN-NEXT:    s_mul_hi_u32 s0, s6, s2
-; GCN-NEXT:    s_mul_i32 s0, s0, s4
-; GCN-NEXT:    s_sub_i32 s0, s6, s0
-; GCN-NEXT:    s_sub_i32 s2, s0, s4
-; GCN-NEXT:    s_cmp_ge_u32 s0, s4
-; GCN-NEXT:    s_cselect_b32 s0, s2, s0
-; GCN-NEXT:    s_sub_i32 s2, s0, s4
-; GCN-NEXT:    s_cmp_ge_u32 s0, s4
-; GCN-NEXT:    s_cselect_b32 s0, s2, s0
-; GCN-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_readfirstlane_b32 s5, v0
+; GCN-NEXT:    s_mul_i32 s3, s3, s5
+; GCN-NEXT:    s_mul_hi_u32 s3, s5, s3
+; GCN-NEXT:    s_add_i32 s5, s5, s3
+; GCN-NEXT:    s_mul_hi_u32 s3, s4, s5
+; GCN-NEXT:    s_mul_i32 s3, s3, s2
+; GCN-NEXT:    s_sub_i32 s3, s4, s3
+; GCN-NEXT:    s_sub_i32 s4, s3, s2
+; GCN-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-NEXT:    s_cselect_b32 s3, s4, s3
+; GCN-NEXT:    s_sub_i32 s4, s3, s2
+; GCN-NEXT:    s_cmp_ge_u32 s3, s2
+; GCN-NEXT:    s_cselect_b32 s8, s4, s3
 ; GCN-NEXT:  .LBB8_3:
+; GCN-NEXT:    v_mov_b32_e32 v0, s8
 ; GCN-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GCN-NEXT:    v_mov_b32_e32 v1, s9
+; GCN-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GCN-NEXT:    s_endpgm
 ; GCN-NEXT:  .LBB8_4:
-; GCN-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GCN-NEXT:    ; implicit-def: $sgpr8_sgpr9
 ; GCN-NEXT:    s_branch .LBB8_2
 ;
 ; TAHITI-LABEL: srem_i64:
@@ -1732,7 +1725,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; TAHITI-NEXT:    v_mul_lo_u32 v8, v8, v5
 ; TAHITI-NEXT:    v_mul_lo_u32 v7, v7, v5
 ; TAHITI-NEXT:    v_add_i32_e32 v9, vcc, v9, v10
-; TAHITI-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
+; TAHITI-NEXT:    v_add_i32_e32 v8, vcc, v8, v9
 ; TAHITI-NEXT:    v_mul_lo_u32 v11, v5, v8
 ; TAHITI-NEXT:    v_mul_hi_u32 v12, v5, v7
 ; TAHITI-NEXT:    v_mul_hi_u32 v13, v5, v8
@@ -1819,7 +1812,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; TAHITI-NEXT:    v_mul_hi_u32 v1, v0, v1
 ; TAHITI-NEXT:    v_mul_lo_u32 v1, v1, v2
 ; TAHITI-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
-; TAHITI-NEXT:    v_subrev_i32_e32 v1, vcc, v2, v0
+; TAHITI-NEXT:    v_sub_i32_e32 v1, vcc, v0, v2
 ; TAHITI-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
 ; TAHITI-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
 ; TAHITI-NEXT:    v_sub_i32_e32 v1, vcc, v0, v2
@@ -1836,150 +1829,175 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ;
 ; TONGA-LABEL: srem_i64:
 ; TONGA:       ; %bb.0:
-; TONGA-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x24
-; TONGA-NEXT:    v_mov_b32_e32 v4, 0
+; TONGA-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; TONGA-NEXT:    s_waitcnt lgkmcnt(0)
-; TONGA-NEXT:    v_mov_b32_e32 v0, s6
-; TONGA-NEXT:    v_mov_b32_e32 v1, s7
+; TONGA-NEXT:    v_mov_b32_e32 v0, s2
+; TONGA-NEXT:    v_mov_b32_e32 v1, s3
 ; TONGA-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
 ; TONGA-NEXT:    s_waitcnt vmcnt(0)
-; TONGA-NEXT:    v_or_b32_e32 v5, v1, v3
-; TONGA-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[4:5]
-; TONGA-NEXT:    s_cbranch_vccz .LBB8_4
+; TONGA-NEXT:    v_readfirstlane_b32 s5, v1
+; TONGA-NEXT:    v_readfirstlane_b32 s4, v0
+; TONGA-NEXT:    v_readfirstlane_b32 s3, v3
+; TONGA-NEXT:    v_readfirstlane_b32 s2, v2
+; TONGA-NEXT:    s_or_b64 s[6:7], s[4:5], s[2:3]
+; TONGA-NEXT:    s_mov_b32 s6, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; TONGA-NEXT:    s_cbranch_scc0 .LBB8_3
 ; TONGA-NEXT:  ; %bb.1:
-; TONGA-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; TONGA-NEXT:    v_add_u32_e32 v5, vcc, v2, v4
-; TONGA-NEXT:    v_addc_u32_e32 v3, vcc, v3, v4, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v9, v5, v4
-; TONGA-NEXT:    v_xor_b32_e32 v10, v3, v4
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v3, v9
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v4, v10
-; TONGA-NEXT:    v_sub_u32_e32 v11, vcc, 0, v9
-; TONGA-NEXT:    v_subb_u32_e32 v12, vcc, 0, v10, vcc
-; TONGA-NEXT:    v_madmk_f32 v3, v4, 0x4f800000, v3
-; TONGA-NEXT:    v_rcp_f32_e32 v3, v3
-; TONGA-NEXT:    v_mul_f32_e32 v3, 0x5f7ffffc, v3
-; TONGA-NEXT:    v_mul_f32_e32 v4, 0x2f800000, v3
-; TONGA-NEXT:    v_trunc_f32_e32 v4, v4
-; TONGA-NEXT:    v_madmk_f32 v3, v4, 0xcf800000, v3
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v7, v4
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v8, v3
-; TONGA-NEXT:    v_mul_lo_u32 v5, v11, v7
-; TONGA-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v11, v8, 0
-; TONGA-NEXT:    v_mul_lo_u32 v6, v12, v8
-; TONGA-NEXT:    v_add_u32_e32 v4, vcc, v4, v5
-; TONGA-NEXT:    v_add_u32_e32 v6, vcc, v4, v6
-; TONGA-NEXT:    v_mad_u64_u32 v[4:5], s[0:1], v8, v6, 0
-; TONGA-NEXT:    v_mul_hi_u32 v13, v8, v3
-; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v13, v4
-; TONGA-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v7, v3, 0
-; TONGA-NEXT:    v_addc_u32_e32 v14, vcc, 0, v5, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[5:6], s[0:1], v7, v6, 0
-; TONGA-NEXT:    v_add_u32_e32 v3, vcc, v13, v3
-; TONGA-NEXT:    v_addc_u32_e32 v3, vcc, v14, v4, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v4, vcc, 0, v6, vcc
-; TONGA-NEXT:    v_add_u32_e32 v3, vcc, v3, v5
-; TONGA-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v8, v3
-; TONGA-NEXT:    v_addc_u32_e32 v14, vcc, v7, v4, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v11, v13, 0
-; TONGA-NEXT:    v_mul_lo_u32 v7, v11, v14
-; TONGA-NEXT:    v_mul_lo_u32 v8, v12, v13
-; TONGA-NEXT:    v_mul_hi_u32 v11, v13, v3
-; TONGA-NEXT:    v_mad_u64_u32 v[5:6], s[0:1], v14, v3, 0
-; TONGA-NEXT:    v_add_u32_e32 v4, vcc, v7, v4
-; TONGA-NEXT:    v_add_u32_e32 v4, vcc, v4, v8
-; TONGA-NEXT:    v_mad_u64_u32 v[7:8], s[0:1], v13, v4, 0
-; TONGA-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v14, v4, 0
-; TONGA-NEXT:    v_add_u32_e32 v7, vcc, v11, v7
-; TONGA-NEXT:    v_addc_u32_e32 v8, vcc, 0, v8, vcc
-; TONGA-NEXT:    v_add_u32_e32 v5, vcc, v7, v5
-; TONGA-NEXT:    v_addc_u32_e32 v5, vcc, v8, v6, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; TONGA-NEXT:    v_add_u32_e32 v3, vcc, v5, v3
-; TONGA-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; TONGA-NEXT:    v_add_u32_e32 v5, vcc, v13, v3
-; TONGA-NEXT:    v_addc_u32_e32 v6, vcc, v14, v4, vcc
-; TONGA-NEXT:    v_ashrrev_i32_e32 v7, 31, v1
-; TONGA-NEXT:    v_add_u32_e32 v3, vcc, v0, v7
-; TONGA-NEXT:    v_xor_b32_e32 v8, v3, v7
-; TONGA-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v8, v6, 0
-; TONGA-NEXT:    v_mul_hi_u32 v11, v8, v5
-; TONGA-NEXT:    v_addc_u32_e32 v1, vcc, v1, v7, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v1, v1, v7
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v11, v3
-; TONGA-NEXT:    v_addc_u32_e32 v12, vcc, 0, v4, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v1, v5, 0
-; TONGA-NEXT:    v_mad_u64_u32 v[5:6], s[0:1], v1, v6, 0
-; TONGA-NEXT:    v_add_u32_e32 v3, vcc, v11, v3
-; TONGA-NEXT:    v_addc_u32_e32 v3, vcc, v12, v4, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v4, vcc, 0, v6, vcc
-; TONGA-NEXT:    v_add_u32_e32 v5, vcc, v3, v5
-; TONGA-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; TONGA-NEXT:    v_mul_lo_u32 v6, v9, v3
-; TONGA-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v9, v5, 0
-; TONGA-NEXT:    v_mul_lo_u32 v5, v10, v5
-; TONGA-NEXT:    v_add_u32_e32 v4, vcc, v6, v4
-; TONGA-NEXT:    v_add_u32_e32 v4, vcc, v5, v4
-; TONGA-NEXT:    v_sub_u32_e32 v5, vcc, v1, v4
-; TONGA-NEXT:    v_sub_u32_e32 v3, vcc, v8, v3
-; TONGA-NEXT:    v_subb_u32_e64 v5, s[0:1], v5, v10, vcc
-; TONGA-NEXT:    v_sub_u32_e64 v6, s[0:1], v3, v9
-; TONGA-NEXT:    v_subbrev_u32_e64 v8, s[2:3], 0, v5, s[0:1]
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v8, v10
-; TONGA-NEXT:    v_cndmask_b32_e64 v11, 0, -1, s[2:3]
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v6, v9
-; TONGA-NEXT:    v_cndmask_b32_e64 v12, 0, -1, s[2:3]
-; TONGA-NEXT:    v_cmp_eq_u32_e64 s[2:3], v8, v10
-; TONGA-NEXT:    v_subb_u32_e64 v5, s[0:1], v5, v10, s[0:1]
-; TONGA-NEXT:    v_cndmask_b32_e64 v11, v11, v12, s[2:3]
-; TONGA-NEXT:    v_sub_u32_e64 v12, s[0:1], v6, v9
-; TONGA-NEXT:    v_subb_u32_e32 v1, vcc, v1, v4, vcc
-; TONGA-NEXT:    v_subbrev_u32_e64 v5, s[0:1], 0, v5, s[0:1]
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v10
-; TONGA-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v11
-; TONGA-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v3, v9
-; TONGA-NEXT:    v_cndmask_b32_e64 v5, v8, v5, s[0:1]
-; TONGA-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
-; TONGA-NEXT:    v_cmp_eq_u32_e32 vcc, v1, v10
-; TONGA-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
-; TONGA-NEXT:    v_cndmask_b32_e64 v6, v6, v12, s[0:1]
-; TONGA-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; TONGA-NEXT:    v_cndmask_b32_e32 v3, v3, v6, vcc
-; TONGA-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v3, v3, v7
-; TONGA-NEXT:    v_xor_b32_e32 v1, v1, v7
-; TONGA-NEXT:    v_sub_u32_e32 v3, vcc, v3, v7
-; TONGA-NEXT:    v_subb_u32_e32 v4, vcc, v1, v7, vcc
-; TONGA-NEXT:    s_cbranch_execnz .LBB8_3
+; TONGA-NEXT:    s_ashr_i32 s6, s3, 31
+; TONGA-NEXT:    s_add_u32 s8, s2, s6
+; TONGA-NEXT:    s_mov_b32 s7, s6
+; TONGA-NEXT:    s_addc_u32 s9, s3, s6
+; TONGA-NEXT:    s_xor_b64 s[6:7], s[8:9], s[6:7]
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v1, s7
+; TONGA-NEXT:    s_sub_u32 s3, 0, s6
+; TONGA-NEXT:    s_subb_u32 s10, 0, s7
+; TONGA-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
+; TONGA-NEXT:    v_rcp_f32_e32 v0, v0
+; TONGA-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; TONGA-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; TONGA-NEXT:    v_trunc_f32_e32 v1, v1
+; TONGA-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v4, v1
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v5, v0
+; TONGA-NEXT:    v_mul_lo_u32 v2, s3, v4
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], s3, v5, 0
+; TONGA-NEXT:    v_mul_lo_u32 v3, s10, v5
+; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v1, v2
+; TONGA-NEXT:    v_add_u32_e32 v3, vcc, v1, v3
+; TONGA-NEXT:    v_mul_hi_u32 v6, v5, v0
+; TONGA-NEXT:    v_mad_u64_u32 v[1:2], s[8:9], v5, v3, 0
+; TONGA-NEXT:    v_add_u32_e32 v6, vcc, v6, v1
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v4, v0, 0
+; TONGA-NEXT:    v_addc_u32_e32 v7, vcc, 0, v2, vcc
+; TONGA-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v4, v3, 0
+; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v6, v0
+; TONGA-NEXT:    v_addc_u32_e32 v0, vcc, v7, v1, vcc
+; TONGA-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; TONGA-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT:    v_add_u32_e32 v6, vcc, v5, v0
+; TONGA-NEXT:    v_addc_u32_e32 v7, vcc, v4, v1, vcc
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], s3, v6, 0
+; TONGA-NEXT:    v_mul_lo_u32 v4, s3, v7
+; TONGA-NEXT:    v_mul_lo_u32 v5, s10, v6
+; TONGA-NEXT:    v_mul_hi_u32 v8, v6, v0
+; TONGA-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], v7, v0, 0
+; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v4, v1
+; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v5, v1
+; TONGA-NEXT:    v_mad_u64_u32 v[4:5], s[8:9], v6, v1, 0
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v7, v1, 0
+; TONGA-NEXT:    v_add_u32_e32 v4, vcc, v8, v4
+; TONGA-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; TONGA-NEXT:    v_add_u32_e32 v2, vcc, v4, v2
+; TONGA-NEXT:    v_addc_u32_e32 v2, vcc, v5, v3, vcc
+; TONGA-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v2, v0
+; TONGA-NEXT:    s_ashr_i32 s10, s5, 31
+; TONGA-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT:    s_add_u32 s8, s4, s10
+; TONGA-NEXT:    v_add_u32_e32 v2, vcc, v6, v0
+; TONGA-NEXT:    s_mov_b32 s11, s10
+; TONGA-NEXT:    s_addc_u32 s9, s5, s10
+; TONGA-NEXT:    v_addc_u32_e32 v3, vcc, v7, v1, vcc
+; TONGA-NEXT:    s_xor_b64 s[12:13], s[8:9], s[10:11]
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], s12, v3, 0
+; TONGA-NEXT:    v_mul_hi_u32 v4, s12, v2
+; TONGA-NEXT:    v_readfirstlane_b32 s3, v1
+; TONGA-NEXT:    v_readfirstlane_b32 s5, v0
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], s13, v3, 0
+; TONGA-NEXT:    v_mad_u64_u32 v[2:3], s[8:9], s13, v2, 0
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v4
+; TONGA-NEXT:    s_add_u32 s5, s14, s5
+; TONGA-NEXT:    s_addc_u32 s3, 0, s3
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v2
+; TONGA-NEXT:    v_readfirstlane_b32 s9, v3
+; TONGA-NEXT:    s_add_u32 s5, s5, s14
+; TONGA-NEXT:    v_readfirstlane_b32 s8, v1
+; TONGA-NEXT:    s_addc_u32 s3, s3, s9
+; TONGA-NEXT:    s_addc_u32 s5, s8, 0
+; TONGA-NEXT:    v_readfirstlane_b32 s8, v0
+; TONGA-NEXT:    s_add_u32 s3, s3, s8
+; TONGA-NEXT:    v_mov_b32_e32 v0, s3
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], s6, v0, 0
+; TONGA-NEXT:    s_addc_u32 s5, 0, s5
+; TONGA-NEXT:    s_mul_i32 s5, s6, s5
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v1
+; TONGA-NEXT:    s_add_i32 s5, s14, s5
+; TONGA-NEXT:    s_mul_i32 s3, s7, s3
+; TONGA-NEXT:    s_add_i32 s5, s5, s3
+; TONGA-NEXT:    s_sub_i32 s3, s13, s5
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v0
+; TONGA-NEXT:    s_sub_i32 s12, s12, s14
+; TONGA-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; TONGA-NEXT:    s_subb_u32 s3, s3, s7
+; TONGA-NEXT:    s_sub_i32 s18, s12, s6
+; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_subb_u32 s19, s3, 0
+; TONGA-NEXT:    s_cmp_ge_u32 s19, s7
+; TONGA-NEXT:    s_cselect_b32 s20, -1, 0
+; TONGA-NEXT:    s_cmp_ge_u32 s18, s6
+; TONGA-NEXT:    s_cselect_b32 s21, -1, 0
+; TONGA-NEXT:    s_cmp_eq_u32 s19, s7
+; TONGA-NEXT:    s_cselect_b32 s20, s21, s20
+; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_subb_u32 s3, s3, s7
+; TONGA-NEXT:    s_sub_i32 s21, s18, s6
+; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_subb_u32 s3, s3, 0
+; TONGA-NEXT:    s_cmp_lg_u32 s20, 0
+; TONGA-NEXT:    s_cselect_b32 s16, s21, s18
+; TONGA-NEXT:    s_cselect_b32 s3, s3, s19
+; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; TONGA-NEXT:    s_subb_u32 s5, s13, s5
+; TONGA-NEXT:    s_cmp_ge_u32 s5, s7
+; TONGA-NEXT:    s_cselect_b32 s13, -1, 0
+; TONGA-NEXT:    s_cmp_ge_u32 s12, s6
+; TONGA-NEXT:    s_cselect_b32 s6, -1, 0
+; TONGA-NEXT:    s_cmp_eq_u32 s5, s7
+; TONGA-NEXT:    s_cselect_b32 s6, s6, s13
+; TONGA-NEXT:    s_cmp_lg_u32 s6, 0
+; TONGA-NEXT:    s_cselect_b32 s7, s3, s5
+; TONGA-NEXT:    s_cselect_b32 s6, s16, s12
+; TONGA-NEXT:    s_xor_b64 s[6:7], s[6:7], s[10:11]
+; TONGA-NEXT:    s_sub_u32 s6, s6, s10
+; TONGA-NEXT:    s_subb_u32 s7, s7, s10
+; TONGA-NEXT:    s_cbranch_execnz .LBB8_4
 ; TONGA-NEXT:  .LBB8_2:
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v1, v2
-; TONGA-NEXT:    v_sub_u32_e32 v3, vcc, 0, v2
-; TONGA-NEXT:    v_mov_b32_e32 v4, 0
-; TONGA-NEXT:    v_rcp_iflag_f32_e32 v1, v1
-; TONGA-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; TONGA-NEXT:    v_mul_lo_u32 v3, v3, v1
-; TONGA-NEXT:    v_mul_hi_u32 v3, v1, v3
-; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v1, v3
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; TONGA-NEXT:    s_sub_i32 s3, 0, s2
+; TONGA-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; TONGA-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; TONGA-NEXT:    v_mul_lo_u32 v1, s3, v0
 ; TONGA-NEXT:    v_mul_hi_u32 v1, v0, v1
-; TONGA-NEXT:    v_mul_lo_u32 v1, v1, v2
-; TONGA-NEXT:    v_sub_u32_e32 v0, vcc, v0, v1
-; TONGA-NEXT:    v_subrev_u32_e32 v1, vcc, v2, v0
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
+; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT:    v_mul_hi_u32 v0, s4, v0
+; TONGA-NEXT:    v_mul_lo_u32 v0, v0, s2
+; TONGA-NEXT:    v_sub_u32_e32 v0, vcc, s4, v0
+; TONGA-NEXT:    v_subrev_u32_e32 v1, vcc, s2, v0
+; TONGA-NEXT:    v_cmp_le_u32_e32 vcc, s2, v0
 ; TONGA-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; TONGA-NEXT:    v_sub_u32_e32 v1, vcc, v0, v2
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
-; TONGA-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
+; TONGA-NEXT:    v_subrev_u32_e32 v1, vcc, s2, v0
+; TONGA-NEXT:    v_cmp_le_u32_e32 vcc, s2, v0
+; TONGA-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; TONGA-NEXT:    v_mov_b32_e32 v1, 0
+; TONGA-NEXT:    s_branch .LBB8_5
 ; TONGA-NEXT:  .LBB8_3:
-; TONGA-NEXT:    v_mov_b32_e32 v0, s4
-; TONGA-NEXT:    v_mov_b32_e32 v1, s5
-; TONGA-NEXT:    flat_store_dwordx2 v[0:1], v[3:4]
-; TONGA-NEXT:    s_endpgm
-; TONGA-NEXT:  .LBB8_4:
-; TONGA-NEXT:    ; implicit-def: $vgpr3_vgpr4
+; TONGA-NEXT:    ; implicit-def: $sgpr6_sgpr7
 ; TONGA-NEXT:    s_branch .LBB8_2
+; TONGA-NEXT:  .LBB8_4:
+; TONGA-NEXT:    v_mov_b32_e32 v0, s6
+; TONGA-NEXT:    v_mov_b32_e32 v1, s7
+; TONGA-NEXT:  .LBB8_5:
+; TONGA-NEXT:    v_mov_b32_e32 v2, s0
+; TONGA-NEXT:    v_mov_b32_e32 v3, s1
+; TONGA-NEXT:    flat_store_dwordx2 v[2:3], v[0:1]
+; TONGA-NEXT:    s_endpgm
 ;
 ; EG-LABEL: srem_i64:
 ; EG:       ; %bb.0:
@@ -2684,35 +2702,35 @@ define amdgpu_kernel void @srem_i64_4(ptr addrspace(1) %out, ptr addrspace(1) %i
 define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %in) {
 ; GCN-LABEL: srem_v2i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
 ; GCN-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    global_load_dwordx4 v[0:3], v8, s[10:11] offset:16
-; GCN-NEXT:    global_load_dwordx4 v[4:7], v8, s[10:11]
+; GCN-NEXT:    global_load_dwordx4 v[0:3], v8, s[2:3] offset:16
+; GCN-NEXT:    global_load_dwordx4 v[4:7], v8, s[2:3]
 ; GCN-NEXT:    s_waitcnt vmcnt(1)
-; GCN-NEXT:    v_readfirstlane_b32 s11, v1
-; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NEXT:    v_readfirstlane_b32 s8, v0
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_readfirstlane_b32 s13, v5
-; GCN-NEXT:    v_readfirstlane_b32 s12, v4
-; GCN-NEXT:    s_or_b64 s[0:1], s[12:13], s[10:11]
-; GCN-NEXT:    s_mov_b32 s0, 0
-; GCN-NEXT:    v_readfirstlane_b32 s5, v3
-; GCN-NEXT:    v_readfirstlane_b32 s4, v2
-; GCN-NEXT:    v_readfirstlane_b32 s7, v7
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    v_readfirstlane_b32 s6, v6
-; GCN-NEXT:    s_cbranch_scc0 .LBB10_7
+; GCN-NEXT:    v_readfirstlane_b32 s11, v5
+; GCN-NEXT:    v_readfirstlane_b32 s10, v4
+; GCN-NEXT:    s_or_b64 s[6:7], s[10:11], s[8:9]
+; GCN-NEXT:    s_mov_b32 s6, 0
+; GCN-NEXT:    v_readfirstlane_b32 s3, v3
+; GCN-NEXT:    v_readfirstlane_b32 s2, v2
+; GCN-NEXT:    v_readfirstlane_b32 s5, v7
+; GCN-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GCN-NEXT:    v_readfirstlane_b32 s4, v6
+; GCN-NEXT:    s_cbranch_scc0 .LBB10_6
 ; GCN-NEXT:  ; %bb.1:
-; GCN-NEXT:    s_ashr_i32 s0, s11, 31
-; GCN-NEXT:    s_add_u32 s2, s10, s0
-; GCN-NEXT:    s_mov_b32 s1, s0
-; GCN-NEXT:    s_addc_u32 s3, s11, s0
-; GCN-NEXT:    s_xor_b64 s[16:17], s[2:3], s[0:1]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s16
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s17
-; GCN-NEXT:    s_sub_u32 s0, 0, s16
-; GCN-NEXT:    s_subb_u32 s1, 0, s17
+; GCN-NEXT:    s_ashr_i32 s6, s9, 31
+; GCN-NEXT:    s_add_u32 s12, s8, s6
+; GCN-NEXT:    s_mov_b32 s7, s6
+; GCN-NEXT:    s_addc_u32 s13, s9, s6
+; GCN-NEXT:    s_xor_b64 s[6:7], s[12:13], s[6:7]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s7
+; GCN-NEXT:    s_sub_u32 s9, 0, s6
+; GCN-NEXT:    s_subb_u32 s16, 0, s7
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2721,321 +2739,312 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_readfirstlane_b32 s2, v1
-; GCN-NEXT:    v_readfirstlane_b32 s3, v0
-; GCN-NEXT:    s_mul_i32 s11, s0, s2
-; GCN-NEXT:    s_mul_hi_u32 s19, s0, s3
-; GCN-NEXT:    s_mul_i32 s18, s1, s3
-; GCN-NEXT:    s_add_i32 s11, s19, s11
-; GCN-NEXT:    s_add_i32 s11, s11, s18
-; GCN-NEXT:    s_mul_i32 s20, s0, s3
-; GCN-NEXT:    s_mul_hi_u32 s18, s3, s11
-; GCN-NEXT:    s_mul_i32 s19, s3, s11
-; GCN-NEXT:    s_mul_hi_u32 s3, s3, s20
-; GCN-NEXT:    s_add_u32 s3, s3, s19
+; GCN-NEXT:    v_readfirstlane_b32 s17, v1
+; GCN-NEXT:    v_readfirstlane_b32 s14, v0
+; GCN-NEXT:    s_mul_i32 s15, s9, s17
+; GCN-NEXT:    s_mul_hi_u32 s19, s9, s14
+; GCN-NEXT:    s_mul_i32 s18, s16, s14
+; GCN-NEXT:    s_add_i32 s15, s19, s15
+; GCN-NEXT:    s_add_i32 s15, s15, s18
+; GCN-NEXT:    s_mul_i32 s20, s9, s14
+; GCN-NEXT:    s_mul_i32 s19, s14, s15
+; GCN-NEXT:    s_mul_hi_u32 s21, s14, s20
+; GCN-NEXT:    s_mul_hi_u32 s18, s14, s15
+; GCN-NEXT:    s_add_u32 s19, s21, s19
 ; GCN-NEXT:    s_addc_u32 s18, 0, s18
-; GCN-NEXT:    s_mul_hi_u32 s21, s2, s20
-; GCN-NEXT:    s_mul_i32 s20, s2, s20
-; GCN-NEXT:    s_add_u32 s3, s3, s20
-; GCN-NEXT:    s_mul_hi_u32 s19, s2, s11
-; GCN-NEXT:    s_addc_u32 s3, s18, s21
-; GCN-NEXT:    s_addc_u32 s18, s19, 0
-; GCN-NEXT:    s_mul_i32 s11, s2, s11
-; GCN-NEXT:    s_add_u32 s3, s3, s11
+; GCN-NEXT:    s_mul_hi_u32 s22, s17, s20
+; GCN-NEXT:    s_mul_i32 s20, s17, s20
+; GCN-NEXT:    s_add_u32 s19, s19, s20
+; GCN-NEXT:    s_mul_hi_u32 s21, s17, s15
+; GCN-NEXT:    s_addc_u32 s18, s18, s22
+; GCN-NEXT:    s_addc_u32 s19, s21, 0
+; GCN-NEXT:    s_mul_i32 s15, s17, s15
+; GCN-NEXT:    s_add_u32 s15, s18, s15
+; GCN-NEXT:    s_addc_u32 s18, 0, s19
+; GCN-NEXT:    s_add_i32 s19, s14, s15
+; GCN-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; GCN-NEXT:    s_addc_u32 s17, s17, s18
+; GCN-NEXT:    s_mul_i32 s14, s9, s17
+; GCN-NEXT:    s_mul_hi_u32 s15, s9, s19
+; GCN-NEXT:    s_add_i32 s14, s15, s14
+; GCN-NEXT:    s_mul_i32 s16, s16, s19
+; GCN-NEXT:    s_add_i32 s14, s14, s16
+; GCN-NEXT:    s_mul_i32 s9, s9, s19
+; GCN-NEXT:    s_mul_hi_u32 s16, s17, s9
+; GCN-NEXT:    s_mul_i32 s18, s17, s9
+; GCN-NEXT:    s_mul_i32 s21, s19, s14
+; GCN-NEXT:    s_mul_hi_u32 s9, s19, s9
+; GCN-NEXT:    s_mul_hi_u32 s20, s19, s14
+; GCN-NEXT:    s_add_u32 s9, s9, s21
+; GCN-NEXT:    s_addc_u32 s20, 0, s20
+; GCN-NEXT:    s_add_u32 s9, s9, s18
+; GCN-NEXT:    s_mul_hi_u32 s15, s17, s14
+; GCN-NEXT:    s_addc_u32 s9, s20, s16
+; GCN-NEXT:    s_addc_u32 s15, s15, 0
+; GCN-NEXT:    s_mul_i32 s14, s17, s14
+; GCN-NEXT:    s_add_u32 s9, s9, s14
+; GCN-NEXT:    s_addc_u32 s16, 0, s15
+; GCN-NEXT:    s_add_i32 s19, s19, s9
+; GCN-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; GCN-NEXT:    s_addc_u32 s9, s17, s16
+; GCN-NEXT:    s_ashr_i32 s14, s11, 31
+; GCN-NEXT:    s_add_u32 s16, s10, s14
+; GCN-NEXT:    s_mov_b32 s15, s14
+; GCN-NEXT:    s_addc_u32 s17, s11, s14
+; GCN-NEXT:    s_xor_b64 s[16:17], s[16:17], s[14:15]
+; GCN-NEXT:    s_mul_i32 s18, s16, s9
+; GCN-NEXT:    s_mul_hi_u32 s20, s16, s19
+; GCN-NEXT:    s_mul_hi_u32 s11, s16, s9
+; GCN-NEXT:    s_add_u32 s18, s20, s18
+; GCN-NEXT:    s_addc_u32 s11, 0, s11
+; GCN-NEXT:    s_mul_hi_u32 s21, s17, s19
+; GCN-NEXT:    s_mul_i32 s19, s17, s19
+; GCN-NEXT:    s_add_u32 s18, s18, s19
+; GCN-NEXT:    s_mul_hi_u32 s20, s17, s9
+; GCN-NEXT:    s_addc_u32 s11, s11, s21
+; GCN-NEXT:    s_addc_u32 s18, s20, 0
+; GCN-NEXT:    s_mul_i32 s9, s17, s9
+; GCN-NEXT:    s_add_u32 s9, s11, s9
 ; GCN-NEXT:    s_addc_u32 s11, 0, s18
-; GCN-NEXT:    v_add_co_u32_e32 v0, vcc, s3, v0
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_addc_u32 s2, s2, s11
-; GCN-NEXT:    v_readfirstlane_b32 s11, v0
-; GCN-NEXT:    s_mul_i32 s3, s0, s2
-; GCN-NEXT:    s_mul_hi_u32 s18, s0, s11
-; GCN-NEXT:    s_add_i32 s3, s18, s3
-; GCN-NEXT:    s_mul_i32 s1, s1, s11
-; GCN-NEXT:    s_add_i32 s3, s3, s1
-; GCN-NEXT:    s_mul_i32 s0, s0, s11
-; GCN-NEXT:    s_mul_hi_u32 s18, s2, s0
-; GCN-NEXT:    s_mul_i32 s19, s2, s0
-; GCN-NEXT:    s_mul_i32 s21, s11, s3
-; GCN-NEXT:    s_mul_hi_u32 s0, s11, s0
-; GCN-NEXT:    s_mul_hi_u32 s20, s11, s3
-; GCN-NEXT:    s_add_u32 s0, s0, s21
-; GCN-NEXT:    s_addc_u32 s11, 0, s20
-; GCN-NEXT:    s_add_u32 s0, s0, s19
-; GCN-NEXT:    s_mul_hi_u32 s1, s2, s3
-; GCN-NEXT:    s_addc_u32 s0, s11, s18
-; GCN-NEXT:    s_addc_u32 s1, s1, 0
-; GCN-NEXT:    s_mul_i32 s3, s2, s3
-; GCN-NEXT:    s_add_u32 s0, s0, s3
-; GCN-NEXT:    s_addc_u32 s1, 0, s1
-; GCN-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_addc_u32 s2, s2, s1
-; GCN-NEXT:    s_ashr_i32 s18, s13, 31
-; GCN-NEXT:    s_add_u32 s0, s12, s18
-; GCN-NEXT:    s_mov_b32 s19, s18
-; GCN-NEXT:    s_addc_u32 s1, s13, s18
-; GCN-NEXT:    s_xor_b64 s[20:21], s[0:1], s[18:19]
-; GCN-NEXT:    v_readfirstlane_b32 s3, v0
-; GCN-NEXT:    s_mul_i32 s1, s20, s2
-; GCN-NEXT:    s_mul_hi_u32 s11, s20, s3
-; GCN-NEXT:    s_mul_hi_u32 s0, s20, s2
-; GCN-NEXT:    s_add_u32 s1, s11, s1
-; GCN-NEXT:    s_addc_u32 s0, 0, s0
-; GCN-NEXT:    s_mul_hi_u32 s13, s21, s3
-; GCN-NEXT:    s_mul_i32 s3, s21, s3
-; GCN-NEXT:    s_add_u32 s1, s1, s3
-; GCN-NEXT:    s_mul_hi_u32 s11, s21, s2
-; GCN-NEXT:    s_addc_u32 s0, s0, s13
-; GCN-NEXT:    s_addc_u32 s1, s11, 0
-; GCN-NEXT:    s_mul_i32 s2, s21, s2
-; GCN-NEXT:    s_add_u32 s0, s0, s2
-; GCN-NEXT:    s_addc_u32 s1, 0, s1
-; GCN-NEXT:    s_mul_i32 s1, s16, s1
-; GCN-NEXT:    s_mul_hi_u32 s2, s16, s0
-; GCN-NEXT:    s_add_i32 s1, s2, s1
-; GCN-NEXT:    s_mul_i32 s2, s17, s0
-; GCN-NEXT:    s_mul_i32 s0, s16, s0
-; GCN-NEXT:    s_add_i32 s11, s1, s2
-; GCN-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-NEXT:    s_sub_i32 s1, s21, s11
-; GCN-NEXT:    v_sub_co_u32_e32 v0, vcc, s20, v0
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_subb_u32 s13, s1, s17
-; GCN-NEXT:    v_subrev_co_u32_e64 v1, s[0:1], s16, v0
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    s_subb_u32 s19, s13, 0
-; GCN-NEXT:    s_cmp_ge_u32 s19, s17
-; GCN-NEXT:    s_cselect_b32 s20, -1, 0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s16, v1
-; GCN-NEXT:    s_cmp_eq_u32 s19, s17
-; GCN-NEXT:    v_cndmask_b32_e64 v2, 0, -1, s[2:3]
-; GCN-NEXT:    v_mov_b32_e32 v3, s20
-; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v3, v2, s[2:3]
-; GCN-NEXT:    s_subb_u32 s2, s13, s17
-; GCN-NEXT:    v_subrev_co_u32_e64 v3, s[0:1], s16, v1
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    s_subb_u32 s2, s2, 0
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v1, v3, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v2, s19
-; GCN-NEXT:    v_mov_b32_e32 v3, s2
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
-; GCN-NEXT:    s_subb_u32 s0, s21, s11
-; GCN-NEXT:    s_cmp_ge_u32 s0, s17
-; GCN-NEXT:    s_cselect_b32 s1, -1, 0
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s16, v0
-; GCN-NEXT:    s_cmp_eq_u32 s0, s17
-; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GCN-NEXT:    v_mov_b32_e32 v4, s1
-; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
-; GCN-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
-; GCN-NEXT:    v_mov_b32_e32 v4, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
-; GCN-NEXT:    v_xor_b32_e32 v0, s18, v0
-; GCN-NEXT:    v_xor_b32_e32 v1, s18, v2
-; GCN-NEXT:    v_mov_b32_e32 v2, s18
-; GCN-NEXT:    v_subrev_co_u32_e32 v0, vcc, s18, v0
-; GCN-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-NEXT:    s_mul_i32 s11, s6, s11
+; GCN-NEXT:    s_mul_hi_u32 s18, s6, s9
+; GCN-NEXT:    s_add_i32 s11, s18, s11
+; GCN-NEXT:    s_mul_i32 s18, s7, s9
+; GCN-NEXT:    s_add_i32 s11, s11, s18
+; GCN-NEXT:    s_sub_i32 s20, s17, s11
+; GCN-NEXT:    s_mul_i32 s9, s6, s9
+; GCN-NEXT:    s_sub_i32 s9, s16, s9
+; GCN-NEXT:    s_cselect_b64 s[18:19], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT:    s_subb_u32 s16, s20, s7
+; GCN-NEXT:    s_sub_i32 s22, s9, s6
+; GCN-NEXT:    s_cselect_b64 s[20:21], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
+; GCN-NEXT:    s_subb_u32 s23, s16, 0
+; GCN-NEXT:    s_cmp_ge_u32 s23, s7
+; GCN-NEXT:    s_cselect_b32 s24, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s22, s6
+; GCN-NEXT:    s_cselect_b32 s25, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s23, s7
+; GCN-NEXT:    s_cselect_b32 s24, s25, s24
+; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
+; GCN-NEXT:    s_subb_u32 s16, s16, s7
+; GCN-NEXT:    s_sub_i32 s25, s22, s6
+; GCN-NEXT:    s_cselect_b64 s[20:21], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
+; GCN-NEXT:    s_subb_u32 s16, s16, 0
+; GCN-NEXT:    s_cmp_lg_u32 s24, 0
+; GCN-NEXT:    s_cselect_b32 s20, s25, s22
+; GCN-NEXT:    s_cselect_b32 s16, s16, s23
+; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT:    s_subb_u32 s11, s17, s11
+; GCN-NEXT:    s_cmp_ge_u32 s11, s7
+; GCN-NEXT:    s_cselect_b32 s17, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s9, s6
+; GCN-NEXT:    s_cselect_b32 s6, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s11, s7
+; GCN-NEXT:    s_cselect_b32 s6, s6, s17
+; GCN-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-NEXT:    s_cselect_b32 s7, s16, s11
+; GCN-NEXT:    s_cselect_b32 s6, s20, s9
+; GCN-NEXT:    s_xor_b64 s[6:7], s[6:7], s[14:15]
+; GCN-NEXT:    s_sub_u32 s6, s6, s14
+; GCN-NEXT:    s_subb_u32 s7, s7, s14
 ; GCN-NEXT:    s_cbranch_execnz .LBB10_3
 ; GCN-NEXT:  .LBB10_2:
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s10
-; GCN-NEXT:    s_sub_i32 s0, 0, s10
-; GCN-NEXT:    s_mov_b32 s1, 0
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-NEXT:    s_sub_i32 s6, 0, s8
+; GCN-NEXT:    s_mov_b32 s7, 0
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_readfirstlane_b32 s2, v0
-; GCN-NEXT:    s_mul_i32 s0, s0, s2
-; GCN-NEXT:    s_mul_hi_u32 s0, s2, s0
-; GCN-NEXT:    s_add_i32 s2, s2, s0
-; GCN-NEXT:    s_mul_hi_u32 s0, s12, s2
-; GCN-NEXT:    s_mul_i32 s0, s0, s10
-; GCN-NEXT:    s_sub_i32 s0, s12, s0
-; GCN-NEXT:    s_sub_i32 s2, s0, s10
-; GCN-NEXT:    s_cmp_ge_u32 s0, s10
-; GCN-NEXT:    s_cselect_b32 s0, s2, s0
-; GCN-NEXT:    s_sub_i32 s2, s0, s10
-; GCN-NEXT:    s_cmp_ge_u32 s0, s10
-; GCN-NEXT:    s_cselect_b32 s0, s2, s0
-; GCN-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    v_readfirstlane_b32 s9, v0
+; GCN-NEXT:    s_mul_i32 s6, s6, s9
+; GCN-NEXT:    s_mul_hi_u32 s6, s9, s6
+; GCN-NEXT:    s_add_i32 s9, s9, s6
+; GCN-NEXT:    s_mul_hi_u32 s6, s10, s9
+; GCN-NEXT:    s_mul_i32 s6, s6, s8
+; GCN-NEXT:    s_sub_i32 s6, s10, s6
+; GCN-NEXT:    s_sub_i32 s9, s6, s8
+; GCN-NEXT:    s_cmp_ge_u32 s6, s8
+; GCN-NEXT:    s_cselect_b32 s6, s9, s6
+; GCN-NEXT:    s_sub_i32 s9, s6, s8
+; GCN-NEXT:    s_cmp_ge_u32 s6, s8
+; GCN-NEXT:    s_cselect_b32 s6, s9, s6
 ; GCN-NEXT:  .LBB10_3:
-; GCN-NEXT:    s_or_b64 s[0:1], s[6:7], s[4:5]
-; GCN-NEXT:    s_mov_b32 s0, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    s_cbranch_scc0 .LBB10_8
+; GCN-NEXT:    s_or_b64 s[8:9], s[4:5], s[2:3]
+; GCN-NEXT:    s_mov_b32 s8, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cbranch_scc0 .LBB10_7
 ; GCN-NEXT:  ; %bb.4:
-; GCN-NEXT:    s_ashr_i32 s0, s5, 31
-; GCN-NEXT:    s_add_u32 s2, s4, s0
-; GCN-NEXT:    s_mov_b32 s1, s0
-; GCN-NEXT:    s_addc_u32 s3, s5, s0
-; GCN-NEXT:    s_xor_b64 s[12:13], s[2:3], s[0:1]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s12
-; GCN-NEXT:    v_cvt_f32_u32_e32 v3, s13
-; GCN-NEXT:    s_sub_u32 s0, 0, s12
-; GCN-NEXT:    s_subb_u32 s1, 0, s13
-; GCN-NEXT:    v_madmk_f32 v2, v3, 0x4f800000, v2
-; GCN-NEXT:    v_rcp_f32_e32 v2, v2
-; GCN-NEXT:    v_mul_f32_e32 v2, 0x5f7ffffc, v2
-; GCN-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v2
-; GCN-NEXT:    v_trunc_f32_e32 v3, v3
-; GCN-NEXT:    v_madmk_f32 v2, v3, 0xcf800000, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT:    v_readfirstlane_b32 s2, v3
-; GCN-NEXT:    v_readfirstlane_b32 s3, v2
-; GCN-NEXT:    s_mul_i32 s5, s0, s2
-; GCN-NEXT:    s_mul_hi_u32 s15, s0, s3
-; GCN-NEXT:    s_mul_i32 s14, s1, s3
-; GCN-NEXT:    s_add_i32 s5, s15, s5
-; GCN-NEXT:    s_add_i32 s5, s5, s14
-; GCN-NEXT:    s_mul_i32 s16, s0, s3
-; GCN-NEXT:    s_mul_hi_u32 s14, s3, s5
-; GCN-NEXT:    s_mul_i32 s15, s3, s5
-; GCN-NEXT:    s_mul_hi_u32 s3, s3, s16
-; GCN-NEXT:    s_add_u32 s3, s3, s15
-; GCN-NEXT:    s_addc_u32 s14, 0, s14
-; GCN-NEXT:    s_mul_hi_u32 s17, s2, s16
-; GCN-NEXT:    s_mul_i32 s16, s2, s16
+; GCN-NEXT:    s_ashr_i32 s8, s3, 31
+; GCN-NEXT:    s_add_u32 s10, s2, s8
+; GCN-NEXT:    s_mov_b32 s9, s8
+; GCN-NEXT:    s_addc_u32 s11, s3, s8
+; GCN-NEXT:    s_xor_b64 s[10:11], s[10:11], s[8:9]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s10
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s11
+; GCN-NEXT:    s_sub_u32 s3, 0, s10
+; GCN-NEXT:    s_subb_u32 s14, 0, s11
+; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
+; GCN-NEXT:    v_rcp_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_readfirstlane_b32 s15, v1
+; GCN-NEXT:    v_readfirstlane_b32 s12, v0
+; GCN-NEXT:    s_mul_i32 s13, s3, s15
+; GCN-NEXT:    s_mul_hi_u32 s17, s3, s12
+; GCN-NEXT:    s_mul_i32 s16, s14, s12
+; GCN-NEXT:    s_add_i32 s13, s17, s13
+; GCN-NEXT:    s_add_i32 s13, s13, s16
+; GCN-NEXT:    s_mul_i32 s18, s3, s12
+; GCN-NEXT:    s_mul_i32 s17, s12, s13
+; GCN-NEXT:    s_mul_hi_u32 s19, s12, s18
+; GCN-NEXT:    s_mul_hi_u32 s16, s12, s13
+; GCN-NEXT:    s_add_u32 s17, s19, s17
+; GCN-NEXT:    s_addc_u32 s16, 0, s16
+; GCN-NEXT:    s_mul_hi_u32 s20, s15, s18
+; GCN-NEXT:    s_mul_i32 s18, s15, s18
+; GCN-NEXT:    s_add_u32 s17, s17, s18
+; GCN-NEXT:    s_mul_hi_u32 s19, s15, s13
+; GCN-NEXT:    s_addc_u32 s16, s16, s20
+; GCN-NEXT:    s_addc_u32 s17, s19, 0
+; GCN-NEXT:    s_mul_i32 s13, s15, s13
+; GCN-NEXT:    s_add_u32 s13, s16, s13
+; GCN-NEXT:    s_addc_u32 s16, 0, s17
+; GCN-NEXT:    s_add_i32 s17, s12, s13
+; GCN-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[12:13], 0
+; GCN-NEXT:    s_addc_u32 s15, s15, s16
+; GCN-NEXT:    s_mul_i32 s12, s3, s15
+; GCN-NEXT:    s_mul_hi_u32 s13, s3, s17
+; GCN-NEXT:    s_add_i32 s12, s13, s12
+; GCN-NEXT:    s_mul_i32 s14, s14, s17
+; GCN-NEXT:    s_add_i32 s12, s12, s14
+; GCN-NEXT:    s_mul_i32 s3, s3, s17
+; GCN-NEXT:    s_mul_hi_u32 s14, s15, s3
+; GCN-NEXT:    s_mul_i32 s16, s15, s3
+; GCN-NEXT:    s_mul_i32 s19, s17, s12
+; GCN-NEXT:    s_mul_hi_u32 s3, s17, s3
+; GCN-NEXT:    s_mul_hi_u32 s18, s17, s12
+; GCN-NEXT:    s_add_u32 s3, s3, s19
+; GCN-NEXT:    s_addc_u32 s18, 0, s18
 ; GCN-NEXT:    s_add_u32 s3, s3, s16
-; GCN-NEXT:    s_mul_hi_u32 s15, s2, s5
-; GCN-NEXT:    s_addc_u32 s3, s14, s17
-; GCN-NEXT:    s_addc_u32 s14, s15, 0
-; GCN-NEXT:    s_mul_i32 s5, s2, s5
-; GCN-NEXT:    s_add_u32 s3, s3, s5
-; GCN-NEXT:    s_addc_u32 s5, 0, s14
-; GCN-NEXT:    v_add_co_u32_e32 v2, vcc, s3, v2
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_addc_u32 s2, s2, s5
-; GCN-NEXT:    v_readfirstlane_b32 s5, v2
-; GCN-NEXT:    s_mul_i32 s3, s0, s2
-; GCN-NEXT:    s_mul_hi_u32 s14, s0, s5
-; GCN-NEXT:    s_add_i32 s3, s14, s3
-; GCN-NEXT:    s_mul_i32 s1, s1, s5
-; GCN-NEXT:    s_add_i32 s3, s3, s1
-; GCN-NEXT:    s_mul_i32 s0, s0, s5
-; GCN-NEXT:    s_mul_hi_u32 s14, s2, s0
-; GCN-NEXT:    s_mul_i32 s15, s2, s0
-; GCN-NEXT:    s_mul_i32 s17, s5, s3
-; GCN-NEXT:    s_mul_hi_u32 s0, s5, s0
-; GCN-NEXT:    s_mul_hi_u32 s16, s5, s3
-; GCN-NEXT:    s_add_u32 s0, s0, s17
+; GCN-NEXT:    s_mul_hi_u32 s13, s15, s12
+; GCN-NEXT:    s_addc_u32 s3, s18, s14
+; GCN-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-NEXT:    s_mul_i32 s12, s15, s12
+; GCN-NEXT:    s_add_u32 s3, s3, s12
+; GCN-NEXT:    s_addc_u32 s14, 0, s13
+; GCN-NEXT:    s_add_i32 s17, s17, s3
+; GCN-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[12:13], 0
+; GCN-NEXT:    s_addc_u32 s3, s15, s14
+; GCN-NEXT:    s_ashr_i32 s12, s5, 31
+; GCN-NEXT:    s_add_u32 s14, s4, s12
+; GCN-NEXT:    s_mov_b32 s13, s12
+; GCN-NEXT:    s_addc_u32 s15, s5, s12
+; GCN-NEXT:    s_xor_b64 s[14:15], s[14:15], s[12:13]
+; GCN-NEXT:    s_mul_i32 s16, s14, s3
+; GCN-NEXT:    s_mul_hi_u32 s18, s14, s17
+; GCN-NEXT:    s_mul_hi_u32 s5, s14, s3
+; GCN-NEXT:    s_add_u32 s16, s18, s16
+; GCN-NEXT:    s_addc_u32 s5, 0, s5
+; GCN-NEXT:    s_mul_hi_u32 s19, s15, s17
+; GCN-NEXT:    s_mul_i32 s17, s15, s17
+; GCN-NEXT:    s_add_u32 s16, s16, s17
+; GCN-NEXT:    s_mul_hi_u32 s18, s15, s3
+; GCN-NEXT:    s_addc_u32 s5, s5, s19
+; GCN-NEXT:    s_addc_u32 s16, s18, 0
+; GCN-NEXT:    s_mul_i32 s3, s15, s3
+; GCN-NEXT:    s_add_u32 s3, s5, s3
 ; GCN-NEXT:    s_addc_u32 s5, 0, s16
-; GCN-NEXT:    s_add_u32 s0, s0, s15
-; GCN-NEXT:    s_mul_hi_u32 s1, s2, s3
-; GCN-NEXT:    s_addc_u32 s0, s5, s14
-; GCN-NEXT:    s_addc_u32 s1, s1, 0
-; GCN-NEXT:    s_mul_i32 s3, s2, s3
-; GCN-NEXT:    s_add_u32 s0, s0, s3
-; GCN-NEXT:    s_addc_u32 s1, 0, s1
-; GCN-NEXT:    v_add_co_u32_e32 v2, vcc, s0, v2
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_addc_u32 s2, s2, s1
-; GCN-NEXT:    s_ashr_i32 s14, s7, 31
-; GCN-NEXT:    s_add_u32 s0, s6, s14
-; GCN-NEXT:    s_mov_b32 s15, s14
-; GCN-NEXT:    s_addc_u32 s1, s7, s14
-; GCN-NEXT:    s_xor_b64 s[16:17], s[0:1], s[14:15]
-; GCN-NEXT:    v_readfirstlane_b32 s3, v2
-; GCN-NEXT:    s_mul_i32 s1, s16, s2
-; GCN-NEXT:    s_mul_hi_u32 s5, s16, s3
-; GCN-NEXT:    s_mul_hi_u32 s0, s16, s2
-; GCN-NEXT:    s_add_u32 s1, s5, s1
-; GCN-NEXT:    s_addc_u32 s0, 0, s0
-; GCN-NEXT:    s_mul_hi_u32 s7, s17, s3
-; GCN-NEXT:    s_mul_i32 s3, s17, s3
-; GCN-NEXT:    s_add_u32 s1, s1, s3
-; GCN-NEXT:    s_mul_hi_u32 s5, s17, s2
-; GCN-NEXT:    s_addc_u32 s0, s0, s7
-; GCN-NEXT:    s_addc_u32 s1, s5, 0
-; GCN-NEXT:    s_mul_i32 s2, s17, s2
-; GCN-NEXT:    s_add_u32 s0, s0, s2
-; GCN-NEXT:    s_addc_u32 s1, 0, s1
-; GCN-NEXT:    s_mul_i32 s1, s12, s1
-; GCN-NEXT:    s_mul_hi_u32 s2, s12, s0
-; GCN-NEXT:    s_add_i32 s1, s2, s1
-; GCN-NEXT:    s_mul_i32 s2, s13, s0
-; GCN-NEXT:    s_mul_i32 s0, s12, s0
-; GCN-NEXT:    s_add_i32 s5, s1, s2
-; GCN-NEXT:    v_mov_b32_e32 v2, s0
-; GCN-NEXT:    s_sub_i32 s1, s17, s5
-; GCN-NEXT:    v_sub_co_u32_e32 v2, vcc, s16, v2
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_subb_u32 s7, s1, s13
-; GCN-NEXT:    v_subrev_co_u32_e64 v3, s[0:1], s12, v2
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    s_subb_u32 s15, s7, 0
-; GCN-NEXT:    s_cmp_ge_u32 s15, s13
-; GCN-NEXT:    s_cselect_b32 s16, -1, 0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v3
-; GCN-NEXT:    s_cmp_eq_u32 s15, s13
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[2:3]
-; GCN-NEXT:    v_mov_b32_e32 v5, s16
-; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v5, v4, s[2:3]
-; GCN-NEXT:    s_subb_u32 s2, s7, s13
-; GCN-NEXT:    v_subrev_co_u32_e64 v5, s[0:1], s12, v3
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    s_subb_u32 s2, s2, 0
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v3, v5, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v4, s15
-; GCN-NEXT:    v_mov_b32_e32 v5, s2
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v4, v5, s[0:1]
-; GCN-NEXT:    s_subb_u32 s0, s17, s5
-; GCN-NEXT:    s_cmp_ge_u32 s0, s13
-; GCN-NEXT:    s_cselect_b32 s1, -1, 0
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v2
-; GCN-NEXT:    s_cmp_eq_u32 s0, s13
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_mov_b32_e32 v6, s1
-; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
-; GCN-NEXT:    v_cndmask_b32_e32 v5, v6, v5, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v5
-; GCN-NEXT:    v_mov_b32_e32 v6, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v6, v4, vcc
-; GCN-NEXT:    v_xor_b32_e32 v2, s14, v2
-; GCN-NEXT:    v_xor_b32_e32 v3, s14, v4
-; GCN-NEXT:    v_mov_b32_e32 v4, s14
-; GCN-NEXT:    v_subrev_co_u32_e32 v2, vcc, s14, v2
-; GCN-NEXT:    v_subb_co_u32_e32 v3, vcc, v3, v4, vcc
-; GCN-NEXT:    s_cbranch_execnz .LBB10_6
+; GCN-NEXT:    s_mul_i32 s5, s10, s5
+; GCN-NEXT:    s_mul_hi_u32 s16, s10, s3
+; GCN-NEXT:    s_add_i32 s5, s16, s5
+; GCN-NEXT:    s_mul_i32 s16, s11, s3
+; GCN-NEXT:    s_add_i32 s5, s5, s16
+; GCN-NEXT:    s_sub_i32 s18, s15, s5
+; GCN-NEXT:    s_mul_i32 s3, s10, s3
+; GCN-NEXT:    s_sub_i32 s3, s14, s3
+; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT:    s_subb_u32 s14, s18, s11
+; GCN-NEXT:    s_sub_i32 s20, s3, s10
+; GCN-NEXT:    s_cselect_b64 s[18:19], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT:    s_subb_u32 s21, s14, 0
+; GCN-NEXT:    s_cmp_ge_u32 s21, s11
+; GCN-NEXT:    s_cselect_b32 s22, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s20, s10
+; GCN-NEXT:    s_cselect_b32 s23, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s21, s11
+; GCN-NEXT:    s_cselect_b32 s22, s23, s22
+; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT:    s_subb_u32 s14, s14, s11
+; GCN-NEXT:    s_sub_i32 s23, s20, s10
+; GCN-NEXT:    s_cselect_b64 s[18:19], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT:    s_subb_u32 s14, s14, 0
+; GCN-NEXT:    s_cmp_lg_u32 s22, 0
+; GCN-NEXT:    s_cselect_b32 s18, s23, s20
+; GCN-NEXT:    s_cselect_b32 s14, s14, s21
+; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT:    s_subb_u32 s5, s15, s5
+; GCN-NEXT:    s_cmp_ge_u32 s5, s11
+; GCN-NEXT:    s_cselect_b32 s15, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s3, s10
+; GCN-NEXT:    s_cselect_b32 s10, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s5, s11
+; GCN-NEXT:    s_cselect_b32 s10, s10, s15
+; GCN-NEXT:    s_cmp_lg_u32 s10, 0
+; GCN-NEXT:    s_cselect_b32 s11, s14, s5
+; GCN-NEXT:    s_cselect_b32 s10, s18, s3
+; GCN-NEXT:    s_xor_b64 s[10:11], s[10:11], s[12:13]
+; GCN-NEXT:    s_sub_u32 s10, s10, s12
+; GCN-NEXT:    s_subb_u32 s11, s11, s12
+; GCN-NEXT:    s_cbranch_execnz .LBB10_8
 ; GCN-NEXT:  .LBB10_5:
-; GCN-NEXT:    v_cvt_f32_u32_e32 v2, s4
-; GCN-NEXT:    s_sub_i32 s0, 0, s4
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v2, v2
-; GCN-NEXT:    v_mul_f32_e32 v2, 0x4f7ffffe, v2
-; GCN-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GCN-NEXT:    v_mul_lo_u32 v3, s0, v2
-; GCN-NEXT:    v_mul_hi_u32 v3, v2, v3
-; GCN-NEXT:    v_add_u32_e32 v2, v2, v3
-; GCN-NEXT:    v_mul_hi_u32 v2, s6, v2
-; GCN-NEXT:    v_mul_lo_u32 v2, v2, s4
-; GCN-NEXT:    v_sub_u32_e32 v2, s6, v2
-; GCN-NEXT:    v_subrev_u32_e32 v3, s4, v2
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s4, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
-; GCN-NEXT:    v_subrev_u32_e32 v3, s4, v2
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s4, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v3, vcc
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-NEXT:    s_sub_i32 s3, 0, s2
 ; GCN-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    v_add_u32_e32 v0, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s4, v0
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s2
+; GCN-NEXT:    v_sub_u32_e32 v0, s4, v0
+; GCN-NEXT:    v_subrev_u32_e32 v1, s2, v0
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT:    v_subrev_u32_e32 v1, s2, v0
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GCN-NEXT:    s_branch .LBB10_9
 ; GCN-NEXT:  .LBB10_6:
-; GCN-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-NEXT:    global_store_dwordx4 v4, v[0:3], s[8:9]
-; GCN-NEXT:    s_endpgm
-; GCN-NEXT:  .LBB10_7:
-; GCN-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GCN-NEXT:    ; implicit-def: $sgpr6_sgpr7
 ; GCN-NEXT:    s_branch .LBB10_2
-; GCN-NEXT:  .LBB10_8:
+; GCN-NEXT:  .LBB10_7:
+; GCN-NEXT:    ; implicit-def: $sgpr10_sgpr11
 ; GCN-NEXT:    s_branch .LBB10_5
+; GCN-NEXT:  .LBB10_8:
+; GCN-NEXT:    v_mov_b32_e32 v2, s10
+; GCN-NEXT:    v_mov_b32_e32 v3, s11
+; GCN-NEXT:  .LBB10_9:
+; GCN-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
+; GCN-NEXT:    s_endpgm
 ;
 ; TAHITI-LABEL: srem_v2i64:
 ; TAHITI:       ; %bb.0:
@@ -3097,7 +3106,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TAHITI-NEXT:    v_mul_lo_u32 v12, v12, v9
 ; TAHITI-NEXT:    v_mul_lo_u32 v11, v11, v9
 ; TAHITI-NEXT:    v_add_i32_e32 v13, vcc, v13, v14
-; TAHITI-NEXT:    v_add_i32_e32 v12, vcc, v13, v12
+; TAHITI-NEXT:    v_add_i32_e32 v12, vcc, v12, v13
 ; TAHITI-NEXT:    v_mul_lo_u32 v15, v9, v12
 ; TAHITI-NEXT:    v_mul_hi_u32 v16, v9, v11
 ; TAHITI-NEXT:    v_mul_hi_u32 v17, v9, v12
@@ -3240,7 +3249,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TAHITI-NEXT:    v_mul_lo_u32 v10, v10, v3
 ; TAHITI-NEXT:    v_mul_lo_u32 v5, v5, v3
 ; TAHITI-NEXT:    v_add_i32_e32 v11, vcc, v11, v12
-; TAHITI-NEXT:    v_add_i32_e32 v10, vcc, v11, v10
+; TAHITI-NEXT:    v_add_i32_e32 v10, vcc, v10, v11
 ; TAHITI-NEXT:    v_mul_lo_u32 v13, v3, v10
 ; TAHITI-NEXT:    v_mul_hi_u32 v14, v3, v5
 ; TAHITI-NEXT:    v_mul_hi_u32 v15, v3, v10
@@ -3347,152 +3356,181 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-LABEL: srem_v2i64:
 ; TONGA:       ; %bb.0:
 ; TONGA-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x24
-; TONGA-NEXT:    v_mov_b32_e32 v8, 0
 ; TONGA-NEXT:    s_waitcnt lgkmcnt(0)
 ; TONGA-NEXT:    s_add_u32 s0, s6, 16
-; TONGA-NEXT:    v_mov_b32_e32 v4, s6
 ; TONGA-NEXT:    s_addc_u32 s1, s7, 0
 ; TONGA-NEXT:    v_mov_b32_e32 v0, s0
-; TONGA-NEXT:    v_mov_b32_e32 v5, s7
+; TONGA-NEXT:    v_mov_b32_e32 v4, s6
 ; TONGA-NEXT:    v_mov_b32_e32 v1, s1
+; TONGA-NEXT:    v_mov_b32_e32 v5, s7
 ; TONGA-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
 ; TONGA-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; TONGA-NEXT:    s_waitcnt vmcnt(1)
+; TONGA-NEXT:    v_readfirstlane_b32 s1, v1
+; TONGA-NEXT:    v_readfirstlane_b32 s0, v0
 ; TONGA-NEXT:    s_waitcnt vmcnt(0)
-; TONGA-NEXT:    v_or_b32_e32 v9, v5, v1
-; TONGA-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; TONGA-NEXT:    s_cbranch_vccz .LBB10_7
+; TONGA-NEXT:    v_readfirstlane_b32 s3, v5
+; TONGA-NEXT:    v_readfirstlane_b32 s2, v4
+; TONGA-NEXT:    s_or_b64 s[6:7], s[2:3], s[0:1]
+; TONGA-NEXT:    s_mov_b32 s6, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; TONGA-NEXT:    s_cbranch_scc0 .LBB10_3
 ; TONGA-NEXT:  ; %bb.1:
-; TONGA-NEXT:    v_ashrrev_i32_e32 v8, 31, v1
-; TONGA-NEXT:    v_add_u32_e32 v9, vcc, v0, v8
-; TONGA-NEXT:    v_addc_u32_e32 v1, vcc, v1, v8, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v14, v9, v8
-; TONGA-NEXT:    v_xor_b32_e32 v1, v1, v8
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v8, v14
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v9, v1
-; TONGA-NEXT:    v_sub_u32_e32 v15, vcc, 0, v14
-; TONGA-NEXT:    v_subb_u32_e32 v16, vcc, 0, v1, vcc
-; TONGA-NEXT:    v_madmk_f32 v8, v9, 0x4f800000, v8
-; TONGA-NEXT:    v_rcp_f32_e32 v8, v8
-; TONGA-NEXT:    v_mul_f32_e32 v8, 0x5f7ffffc, v8
-; TONGA-NEXT:    v_mul_f32_e32 v9, 0x2f800000, v8
-; TONGA-NEXT:    v_trunc_f32_e32 v9, v9
-; TONGA-NEXT:    v_madmk_f32 v8, v9, 0xcf800000, v8
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v12, v9
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v13, v8
-; TONGA-NEXT:    v_mul_lo_u32 v10, v15, v12
-; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[0:1], v15, v13, 0
-; TONGA-NEXT:    v_mul_lo_u32 v11, v16, v13
-; TONGA-NEXT:    v_add_u32_e32 v9, vcc, v9, v10
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v9, v11
-; TONGA-NEXT:    v_mad_u64_u32 v[9:10], s[0:1], v13, v11, 0
-; TONGA-NEXT:    v_mul_hi_u32 v17, v13, v8
-; TONGA-NEXT:    v_add_u32_e32 v17, vcc, v17, v9
-; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[0:1], v12, v8, 0
-; TONGA-NEXT:    v_addc_u32_e32 v18, vcc, 0, v10, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[0:1], v12, v11, 0
-; TONGA-NEXT:    v_add_u32_e32 v8, vcc, v17, v8
-; TONGA-NEXT:    v_addc_u32_e32 v8, vcc, v18, v9, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v9, vcc, 0, v11, vcc
-; TONGA-NEXT:    v_add_u32_e32 v8, vcc, v8, v10
-; TONGA-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; TONGA-NEXT:    v_add_u32_e32 v17, vcc, v13, v8
-; TONGA-NEXT:    v_addc_u32_e32 v18, vcc, v12, v9, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[0:1], v15, v17, 0
-; TONGA-NEXT:    v_mul_lo_u32 v12, v15, v18
-; TONGA-NEXT:    v_mul_lo_u32 v13, v16, v17
-; TONGA-NEXT:    v_mul_hi_u32 v15, v17, v8
-; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[0:1], v18, v8, 0
-; TONGA-NEXT:    v_add_u32_e32 v9, vcc, v12, v9
-; TONGA-NEXT:    v_add_u32_e32 v9, vcc, v9, v13
-; TONGA-NEXT:    v_mad_u64_u32 v[12:13], s[0:1], v17, v9, 0
-; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[0:1], v18, v9, 0
-; TONGA-NEXT:    v_add_u32_e32 v12, vcc, v15, v12
-; TONGA-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
-; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v12, v10
-; TONGA-NEXT:    v_addc_u32_e32 v10, vcc, v13, v11, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; TONGA-NEXT:    v_add_u32_e32 v8, vcc, v10, v8
+; TONGA-NEXT:    s_ashr_i32 s6, s1, 31
+; TONGA-NEXT:    s_add_u32 s8, s0, s6
+; TONGA-NEXT:    s_mov_b32 s7, s6
+; TONGA-NEXT:    s_addc_u32 s9, s1, s6
+; TONGA-NEXT:    s_xor_b64 s[6:7], s[8:9], s[6:7]
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v1, s7
+; TONGA-NEXT:    s_sub_u32 s1, 0, s6
+; TONGA-NEXT:    s_subb_u32 s10, 0, s7
+; TONGA-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
+; TONGA-NEXT:    v_rcp_f32_e32 v0, v0
+; TONGA-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; TONGA-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; TONGA-NEXT:    v_trunc_f32_e32 v1, v1
+; TONGA-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v8, v1
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v9, v0
+; TONGA-NEXT:    v_mul_lo_u32 v4, s1, v8
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], s1, v9, 0
+; TONGA-NEXT:    v_mul_lo_u32 v5, s10, v9
+; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v1, v4
+; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v1, v5
+; TONGA-NEXT:    v_mul_hi_u32 v10, v9, v0
+; TONGA-NEXT:    v_mad_u64_u32 v[4:5], s[8:9], v9, v11, 0
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v8, v0, 0
+; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v10, v4
+; TONGA-NEXT:    v_addc_u32_e32 v12, vcc, 0, v5, vcc
+; TONGA-NEXT:    v_mad_u64_u32 v[4:5], s[8:9], v8, v11, 0
+; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v10, v0
+; TONGA-NEXT:    v_addc_u32_e32 v0, vcc, v12, v1, vcc
+; TONGA-NEXT:    v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v0, v4
+; TONGA-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v9, v0
+; TONGA-NEXT:    v_addc_u32_e32 v11, vcc, v8, v1, vcc
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], s1, v10, 0
+; TONGA-NEXT:    v_mul_lo_u32 v8, s1, v11
+; TONGA-NEXT:    v_mul_lo_u32 v9, s10, v10
+; TONGA-NEXT:    v_mul_hi_u32 v12, v10, v0
+; TONGA-NEXT:    v_mad_u64_u32 v[4:5], s[8:9], v11, v0, 0
+; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v8, v1
+; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v9, v1
+; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[8:9], v10, v1, 0
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], v11, v1, 0
+; TONGA-NEXT:    v_add_u32_e32 v8, vcc, v12, v8
 ; TONGA-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v17, v8
-; TONGA-NEXT:    v_addc_u32_e32 v11, vcc, v18, v9, vcc
-; TONGA-NEXT:    v_ashrrev_i32_e32 v12, 31, v5
-; TONGA-NEXT:    v_add_u32_e32 v8, vcc, v4, v12
-; TONGA-NEXT:    v_xor_b32_e32 v13, v8, v12
-; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[0:1], v13, v11, 0
-; TONGA-NEXT:    v_mul_hi_u32 v15, v13, v10
-; TONGA-NEXT:    v_addc_u32_e32 v5, vcc, v5, v12, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v5, v5, v12
-; TONGA-NEXT:    v_add_u32_e32 v15, vcc, v15, v8
-; TONGA-NEXT:    v_addc_u32_e32 v16, vcc, 0, v9, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[0:1], v5, v10, 0
-; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[0:1], v5, v11, 0
-; TONGA-NEXT:    v_add_u32_e32 v8, vcc, v15, v8
-; TONGA-NEXT:    v_addc_u32_e32 v8, vcc, v16, v9, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v9, vcc, 0, v11, vcc
-; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v8, v10
-; TONGA-NEXT:    v_addc_u32_e32 v8, vcc, 0, v9, vcc
-; TONGA-NEXT:    v_mul_lo_u32 v11, v14, v8
-; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[0:1], v14, v10, 0
-; TONGA-NEXT:    v_mul_lo_u32 v10, v1, v10
-; TONGA-NEXT:    v_add_u32_e32 v9, vcc, v11, v9
-; TONGA-NEXT:    v_add_u32_e32 v9, vcc, v10, v9
-; TONGA-NEXT:    v_sub_u32_e32 v10, vcc, v5, v9
-; TONGA-NEXT:    v_sub_u32_e32 v8, vcc, v13, v8
-; TONGA-NEXT:    v_subb_u32_e64 v10, s[0:1], v10, v1, vcc
-; TONGA-NEXT:    v_sub_u32_e64 v11, s[0:1], v8, v14
-; TONGA-NEXT:    v_subbrev_u32_e64 v13, s[2:3], 0, v10, s[0:1]
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v13, v1
-; TONGA-NEXT:    v_cndmask_b32_e64 v15, 0, -1, s[2:3]
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v11, v14
-; TONGA-NEXT:    v_cndmask_b32_e64 v16, 0, -1, s[2:3]
-; TONGA-NEXT:    v_cmp_eq_u32_e64 s[2:3], v13, v1
-; TONGA-NEXT:    v_subb_u32_e64 v10, s[0:1], v10, v1, s[0:1]
-; TONGA-NEXT:    v_cndmask_b32_e64 v15, v15, v16, s[2:3]
-; TONGA-NEXT:    v_sub_u32_e64 v16, s[0:1], v11, v14
-; TONGA-NEXT:    v_subb_u32_e32 v5, vcc, v5, v9, vcc
-; TONGA-NEXT:    v_subbrev_u32_e64 v10, s[0:1], 0, v10, s[0:1]
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v1
-; TONGA-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v15
-; TONGA-NEXT:    v_cndmask_b32_e64 v9, 0, -1, vcc
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v8, v14
-; TONGA-NEXT:    v_cndmask_b32_e64 v10, v13, v10, s[0:1]
-; TONGA-NEXT:    v_cndmask_b32_e64 v13, 0, -1, vcc
-; TONGA-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v1
-; TONGA-NEXT:    v_cndmask_b32_e32 v1, v9, v13, vcc
-; TONGA-NEXT:    v_cndmask_b32_e64 v11, v11, v16, s[0:1]
-; TONGA-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; TONGA-NEXT:    v_cndmask_b32_e32 v1, v5, v10, vcc
-; TONGA-NEXT:    v_cndmask_b32_e32 v5, v8, v11, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v5, v5, v12
-; TONGA-NEXT:    v_xor_b32_e32 v1, v1, v12
-; TONGA-NEXT:    v_sub_u32_e32 v8, vcc, v5, v12
-; TONGA-NEXT:    v_subb_u32_e32 v9, vcc, v1, v12, vcc
-; TONGA-NEXT:    s_cbranch_execnz .LBB10_3
+; TONGA-NEXT:    v_add_u32_e32 v4, vcc, v8, v4
+; TONGA-NEXT:    v_addc_u32_e32 v4, vcc, v9, v5, vcc
+; TONGA-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v4, v0
+; TONGA-NEXT:    s_ashr_i32 s10, s3, 31
+; TONGA-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; TONGA-NEXT:    s_add_u32 s8, s2, s10
+; TONGA-NEXT:    v_add_u32_e32 v4, vcc, v10, v0
+; TONGA-NEXT:    s_mov_b32 s11, s10
+; TONGA-NEXT:    s_addc_u32 s9, s3, s10
+; TONGA-NEXT:    v_addc_u32_e32 v5, vcc, v11, v1, vcc
+; TONGA-NEXT:    s_xor_b64 s[12:13], s[8:9], s[10:11]
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], s12, v5, 0
+; TONGA-NEXT:    v_mul_hi_u32 v8, s12, v4
+; TONGA-NEXT:    v_readfirstlane_b32 s1, v1
+; TONGA-NEXT:    v_readfirstlane_b32 s3, v0
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], s13, v5, 0
+; TONGA-NEXT:    v_mad_u64_u32 v[4:5], s[8:9], s13, v4, 0
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v8
+; TONGA-NEXT:    s_add_u32 s3, s14, s3
+; TONGA-NEXT:    s_addc_u32 s1, 0, s1
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v4
+; TONGA-NEXT:    v_readfirstlane_b32 s9, v5
+; TONGA-NEXT:    s_add_u32 s3, s3, s14
+; TONGA-NEXT:    v_readfirstlane_b32 s8, v1
+; TONGA-NEXT:    s_addc_u32 s1, s1, s9
+; TONGA-NEXT:    s_addc_u32 s3, s8, 0
+; TONGA-NEXT:    v_readfirstlane_b32 s8, v0
+; TONGA-NEXT:    s_add_u32 s1, s1, s8
+; TONGA-NEXT:    v_mov_b32_e32 v0, s1
+; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[8:9], s6, v0, 0
+; TONGA-NEXT:    s_addc_u32 s3, 0, s3
+; TONGA-NEXT:    s_mul_i32 s3, s6, s3
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v1
+; TONGA-NEXT:    s_add_i32 s3, s14, s3
+; TONGA-NEXT:    s_mul_i32 s1, s7, s1
+; TONGA-NEXT:    s_add_i32 s3, s3, s1
+; TONGA-NEXT:    s_sub_i32 s1, s13, s3
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v0
+; TONGA-NEXT:    s_sub_i32 s12, s12, s14
+; TONGA-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; TONGA-NEXT:    s_subb_u32 s1, s1, s7
+; TONGA-NEXT:    s_sub_i32 s18, s12, s6
+; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_subb_u32 s19, s1, 0
+; TONGA-NEXT:    s_cmp_ge_u32 s19, s7
+; TONGA-NEXT:    s_cselect_b32 s20, -1, 0
+; TONGA-NEXT:    s_cmp_ge_u32 s18, s6
+; TONGA-NEXT:    s_cselect_b32 s21, -1, 0
+; TONGA-NEXT:    s_cmp_eq_u32 s19, s7
+; TONGA-NEXT:    s_cselect_b32 s20, s21, s20
+; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_subb_u32 s1, s1, s7
+; TONGA-NEXT:    s_sub_i32 s21, s18, s6
+; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_subb_u32 s1, s1, 0
+; TONGA-NEXT:    s_cmp_lg_u32 s20, 0
+; TONGA-NEXT:    s_cselect_b32 s16, s21, s18
+; TONGA-NEXT:    s_cselect_b32 s1, s1, s19
+; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; TONGA-NEXT:    s_subb_u32 s3, s13, s3
+; TONGA-NEXT:    s_cmp_ge_u32 s3, s7
+; TONGA-NEXT:    s_cselect_b32 s13, -1, 0
+; TONGA-NEXT:    s_cmp_ge_u32 s12, s6
+; TONGA-NEXT:    s_cselect_b32 s6, -1, 0
+; TONGA-NEXT:    s_cmp_eq_u32 s3, s7
+; TONGA-NEXT:    s_cselect_b32 s6, s6, s13
+; TONGA-NEXT:    s_cmp_lg_u32 s6, 0
+; TONGA-NEXT:    s_cselect_b32 s7, s1, s3
+; TONGA-NEXT:    s_cselect_b32 s6, s16, s12
+; TONGA-NEXT:    s_xor_b64 s[6:7], s[6:7], s[10:11]
+; TONGA-NEXT:    s_sub_u32 s6, s6, s10
+; TONGA-NEXT:    s_subb_u32 s7, s7, s10
+; TONGA-NEXT:    s_cbranch_execnz .LBB10_4
 ; TONGA-NEXT:  .LBB10_2:
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v1, v0
-; TONGA-NEXT:    v_sub_u32_e32 v5, vcc, 0, v0
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; TONGA-NEXT:    s_sub_i32 s1, 0, s0
 ; TONGA-NEXT:    v_mov_b32_e32 v9, 0
-; TONGA-NEXT:    v_rcp_iflag_f32_e32 v1, v1
-; TONGA-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; TONGA-NEXT:    v_mul_lo_u32 v5, v5, v1
-; TONGA-NEXT:    v_mul_hi_u32 v5, v1, v5
-; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v1, v5
-; TONGA-NEXT:    v_mul_hi_u32 v1, v4, v1
-; TONGA-NEXT:    v_mul_lo_u32 v1, v1, v0
-; TONGA-NEXT:    v_sub_u32_e32 v1, vcc, v4, v1
-; TONGA-NEXT:    v_subrev_u32_e32 v4, vcc, v0, v1
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v0
-; TONGA-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
-; TONGA-NEXT:    v_sub_u32_e32 v4, vcc, v1, v0
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v0
-; TONGA-NEXT:    v_cndmask_b32_e32 v8, v1, v4, vcc
+; TONGA-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; TONGA-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; TONGA-NEXT:    v_mul_lo_u32 v1, s1, v0
+; TONGA-NEXT:    v_mul_hi_u32 v1, v0, v1
+; TONGA-NEXT:    v_add_u32_e32 v0, vcc, v0, v1
+; TONGA-NEXT:    v_mul_hi_u32 v0, s2, v0
+; TONGA-NEXT:    v_mul_lo_u32 v0, v0, s0
+; TONGA-NEXT:    v_sub_u32_e32 v0, vcc, s2, v0
+; TONGA-NEXT:    v_subrev_u32_e32 v1, vcc, s0, v0
+; TONGA-NEXT:    v_cmp_le_u32_e32 vcc, s0, v0
+; TONGA-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; TONGA-NEXT:    v_subrev_u32_e32 v1, vcc, s0, v0
+; TONGA-NEXT:    v_cmp_le_u32_e32 vcc, s0, v0
+; TONGA-NEXT:    v_cndmask_b32_e32 v8, v0, v1, vcc
+; TONGA-NEXT:    s_branch .LBB10_5
 ; TONGA-NEXT:  .LBB10_3:
+; TONGA-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; TONGA-NEXT:    s_branch .LBB10_2
+; TONGA-NEXT:  .LBB10_4:
+; TONGA-NEXT:    v_mov_b32_e32 v9, s7
+; TONGA-NEXT:    v_mov_b32_e32 v8, s6
+; TONGA-NEXT:  .LBB10_5:
 ; TONGA-NEXT:    v_or_b32_e32 v1, v7, v3
 ; TONGA-NEXT:    v_mov_b32_e32 v0, 0
 ; TONGA-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; TONGA-NEXT:    s_cbranch_vccz .LBB10_8
-; TONGA-NEXT:  ; %bb.4:
+; TONGA-NEXT:    s_cbranch_vccz .LBB10_9
+; TONGA-NEXT:  ; %bb.6:
 ; TONGA-NEXT:    v_ashrrev_i32_e32 v0, 31, v3
 ; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v2, v0
 ; TONGA-NEXT:    v_addc_u32_e32 v3, vcc, v3, v0, vcc
@@ -3534,7 +3572,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_mul_hi_u32 v13, v15, v0
 ; TONGA-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v16, v0, 0
 ; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v10, v1
-; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v1, v11
+; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v11, v1
 ; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[0:1], v15, v1, 0
 ; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], v16, v1, 0
 ; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v13, v10
@@ -3598,8 +3636,8 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_xor_b32_e32 v1, v1, v11
 ; TONGA-NEXT:    v_sub_u32_e32 v10, vcc, v0, v11
 ; TONGA-NEXT:    v_subb_u32_e32 v11, vcc, v1, v11, vcc
-; TONGA-NEXT:    s_cbranch_execnz .LBB10_6
-; TONGA-NEXT:  .LBB10_5:
+; TONGA-NEXT:    s_cbranch_execnz .LBB10_8
+; TONGA-NEXT:  .LBB10_7:
 ; TONGA-NEXT:    v_cvt_f32_u32_e32 v0, v2
 ; TONGA-NEXT:    v_sub_u32_e32 v1, vcc, 0, v2
 ; TONGA-NEXT:    v_mov_b32_e32 v11, 0
@@ -3618,16 +3656,13 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_subrev_u32_e32 v1, vcc, v2, v0
 ; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
 ; TONGA-NEXT:    v_cndmask_b32_e32 v10, v0, v1, vcc
-; TONGA-NEXT:  .LBB10_6:
+; TONGA-NEXT:  .LBB10_8:
 ; TONGA-NEXT:    v_mov_b32_e32 v0, s4
 ; TONGA-NEXT:    v_mov_b32_e32 v1, s5
 ; TONGA-NEXT:    flat_store_dwordx4 v[0:1], v[8:11]
 ; TONGA-NEXT:    s_endpgm
-; TONGA-NEXT:  .LBB10_7:
-; TONGA-NEXT:    ; implicit-def: $vgpr8_vgpr9
-; TONGA-NEXT:    s_branch .LBB10_2
-; TONGA-NEXT:  .LBB10_8:
-; TONGA-NEXT:    s_branch .LBB10_5
+; TONGA-NEXT:  .LBB10_9:
+; TONGA-NEXT:    s_branch .LBB10_7
 ;
 ; EG-LABEL: srem_v2i64:
 ; EG:       ; %bb.0:
@@ -4860,629 +4895,687 @@ define amdgpu_kernel void @srem_v2i64_4(ptr addrspace(1) %out, ptr addrspace(1)
 define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %in) {
 ; GCN-LABEL: srem_v4i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x24
-; GCN-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GCN-NEXT:    v_mov_b32_e32 v16, 0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    global_load_dwordx4 v[10:13], v8, s[10:11] offset:32
-; GCN-NEXT:    global_load_dwordx4 v[14:17], v8, s[10:11]
-; GCN-NEXT:    global_load_dwordx4 v[0:3], v8, s[10:11] offset:48
-; GCN-NEXT:    global_load_dwordx4 v[4:7], v8, s[10:11] offset:16
+; GCN-NEXT:    global_load_dwordx4 v[0:3], v16, s[2:3] offset:48
+; GCN-NEXT:    global_load_dwordx4 v[4:7], v16, s[2:3] offset:32
+; GCN-NEXT:    global_load_dwordx4 v[8:11], v16, s[2:3] offset:16
+; GCN-NEXT:    global_load_dwordx4 v[12:15], v16, s[2:3]
 ; GCN-NEXT:    s_waitcnt vmcnt(3)
+; GCN-NEXT:    v_readfirstlane_b32 s3, v3
+; GCN-NEXT:    s_waitcnt vmcnt(2)
+; GCN-NEXT:    v_readfirstlane_b32 s17, v5
+; GCN-NEXT:    v_readfirstlane_b32 s16, v4
+; GCN-NEXT:    s_waitcnt vmcnt(0)
+; GCN-NEXT:    v_readfirstlane_b32 s19, v13
+; GCN-NEXT:    v_readfirstlane_b32 s18, v12
+; GCN-NEXT:    s_or_b64 s[6:7], s[18:19], s[16:17]
+; GCN-NEXT:    s_mov_b32 s6, 0
+; GCN-NEXT:    v_readfirstlane_b32 s2, v2
+; GCN-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NEXT:    v_readfirstlane_b32 s8, v0
+; GCN-NEXT:    v_readfirstlane_b32 s13, v7
+; GCN-NEXT:    v_readfirstlane_b32 s12, v6
 ; GCN-NEXT:    v_readfirstlane_b32 s5, v11
 ; GCN-NEXT:    v_readfirstlane_b32 s4, v10
-; GCN-NEXT:    s_waitcnt vmcnt(2)
-; GCN-NEXT:    v_readfirstlane_b32 s7, v15
-; GCN-NEXT:    v_readfirstlane_b32 s6, v14
-; GCN-NEXT:    s_or_b64 s[0:1], s[6:7], s[4:5]
-; GCN-NEXT:    s_mov_b32 s0, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    s_cbranch_scc0 .LBB12_13
+; GCN-NEXT:    v_readfirstlane_b32 s11, v9
+; GCN-NEXT:    v_readfirstlane_b32 s10, v8
+; GCN-NEXT:    v_readfirstlane_b32 s15, v15
+; GCN-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; GCN-NEXT:    v_readfirstlane_b32 s14, v14
+; GCN-NEXT:    s_cbranch_scc0 .LBB12_6
 ; GCN-NEXT:  ; %bb.1:
-; GCN-NEXT:    s_ashr_i32 s0, s5, 31
-; GCN-NEXT:    s_add_u32 s2, s4, s0
-; GCN-NEXT:    s_mov_b32 s1, s0
-; GCN-NEXT:    s_addc_u32 s3, s5, s0
-; GCN-NEXT:    s_xor_b64 s[12:13], s[2:3], s[0:1]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v8, s12
-; GCN-NEXT:    v_cvt_f32_u32_e32 v9, s13
-; GCN-NEXT:    s_sub_u32 s0, 0, s12
-; GCN-NEXT:    s_subb_u32 s1, 0, s13
-; GCN-NEXT:    v_madmk_f32 v8, v9, 0x4f800000, v8
-; GCN-NEXT:    v_rcp_f32_e32 v8, v8
-; GCN-NEXT:    v_mul_f32_e32 v8, 0x5f7ffffc, v8
-; GCN-NEXT:    v_mul_f32_e32 v9, 0x2f800000, v8
-; GCN-NEXT:    v_trunc_f32_e32 v9, v9
-; GCN-NEXT:    v_madmk_f32 v8, v9, 0xcf800000, v8
-; GCN-NEXT:    v_cvt_u32_f32_e32 v9, v9
-; GCN-NEXT:    v_cvt_u32_f32_e32 v8, v8
-; GCN-NEXT:    v_readfirstlane_b32 s2, v9
-; GCN-NEXT:    v_readfirstlane_b32 s3, v8
-; GCN-NEXT:    s_mul_i32 s5, s0, s2
-; GCN-NEXT:    s_mul_hi_u32 s15, s0, s3
-; GCN-NEXT:    s_mul_i32 s14, s1, s3
-; GCN-NEXT:    s_add_i32 s5, s15, s5
-; GCN-NEXT:    s_add_i32 s5, s5, s14
-; GCN-NEXT:    s_mul_i32 s16, s0, s3
-; GCN-NEXT:    s_mul_hi_u32 s14, s3, s5
-; GCN-NEXT:    s_mul_i32 s15, s3, s5
-; GCN-NEXT:    s_mul_hi_u32 s3, s3, s16
-; GCN-NEXT:    s_add_u32 s3, s3, s15
-; GCN-NEXT:    s_addc_u32 s14, 0, s14
-; GCN-NEXT:    s_mul_hi_u32 s17, s2, s16
-; GCN-NEXT:    s_mul_i32 s16, s2, s16
-; GCN-NEXT:    s_add_u32 s3, s3, s16
-; GCN-NEXT:    s_mul_hi_u32 s15, s2, s5
-; GCN-NEXT:    s_addc_u32 s3, s14, s17
-; GCN-NEXT:    s_addc_u32 s14, s15, 0
-; GCN-NEXT:    s_mul_i32 s5, s2, s5
-; GCN-NEXT:    s_add_u32 s3, s3, s5
-; GCN-NEXT:    s_addc_u32 s5, 0, s14
-; GCN-NEXT:    v_add_co_u32_e32 v8, vcc, s3, v8
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_addc_u32 s2, s2, s5
-; GCN-NEXT:    v_readfirstlane_b32 s5, v8
-; GCN-NEXT:    s_mul_i32 s3, s0, s2
-; GCN-NEXT:    s_mul_hi_u32 s14, s0, s5
-; GCN-NEXT:    s_add_i32 s3, s14, s3
-; GCN-NEXT:    s_mul_i32 s1, s1, s5
-; GCN-NEXT:    s_add_i32 s3, s3, s1
-; GCN-NEXT:    s_mul_i32 s0, s0, s5
-; GCN-NEXT:    s_mul_hi_u32 s14, s2, s0
-; GCN-NEXT:    s_mul_i32 s15, s2, s0
-; GCN-NEXT:    s_mul_i32 s17, s5, s3
-; GCN-NEXT:    s_mul_hi_u32 s0, s5, s0
-; GCN-NEXT:    s_mul_hi_u32 s16, s5, s3
-; GCN-NEXT:    s_add_u32 s0, s0, s17
-; GCN-NEXT:    s_addc_u32 s5, 0, s16
-; GCN-NEXT:    s_add_u32 s0, s0, s15
-; GCN-NEXT:    s_mul_hi_u32 s1, s2, s3
-; GCN-NEXT:    s_addc_u32 s0, s5, s14
-; GCN-NEXT:    s_addc_u32 s1, s1, 0
-; GCN-NEXT:    s_mul_i32 s3, s2, s3
-; GCN-NEXT:    s_add_u32 s0, s0, s3
-; GCN-NEXT:    s_addc_u32 s1, 0, s1
-; GCN-NEXT:    v_add_co_u32_e32 v8, vcc, s0, v8
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_addc_u32 s2, s2, s1
-; GCN-NEXT:    s_ashr_i32 s14, s7, 31
-; GCN-NEXT:    s_add_u32 s0, s6, s14
-; GCN-NEXT:    s_mov_b32 s15, s14
-; GCN-NEXT:    s_addc_u32 s1, s7, s14
-; GCN-NEXT:    s_xor_b64 s[16:17], s[0:1], s[14:15]
-; GCN-NEXT:    v_readfirstlane_b32 s3, v8
-; GCN-NEXT:    s_mul_i32 s1, s16, s2
-; GCN-NEXT:    s_mul_hi_u32 s5, s16, s3
-; GCN-NEXT:    s_mul_hi_u32 s0, s16, s2
-; GCN-NEXT:    s_add_u32 s1, s5, s1
-; GCN-NEXT:    s_addc_u32 s0, 0, s0
-; GCN-NEXT:    s_mul_hi_u32 s7, s17, s3
-; GCN-NEXT:    s_mul_i32 s3, s17, s3
-; GCN-NEXT:    s_add_u32 s1, s1, s3
-; GCN-NEXT:    s_mul_hi_u32 s5, s17, s2
-; GCN-NEXT:    s_addc_u32 s0, s0, s7
-; GCN-NEXT:    s_addc_u32 s1, s5, 0
-; GCN-NEXT:    s_mul_i32 s2, s17, s2
-; GCN-NEXT:    s_add_u32 s0, s0, s2
-; GCN-NEXT:    s_addc_u32 s1, 0, s1
-; GCN-NEXT:    s_mul_i32 s1, s12, s1
-; GCN-NEXT:    s_mul_hi_u32 s2, s12, s0
-; GCN-NEXT:    s_add_i32 s1, s2, s1
-; GCN-NEXT:    s_mul_i32 s2, s13, s0
-; GCN-NEXT:    s_mul_i32 s0, s12, s0
-; GCN-NEXT:    s_add_i32 s5, s1, s2
-; GCN-NEXT:    v_mov_b32_e32 v8, s0
-; GCN-NEXT:    s_sub_i32 s1, s17, s5
-; GCN-NEXT:    v_sub_co_u32_e32 v8, vcc, s16, v8
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    s_subb_u32 s7, s1, s13
-; GCN-NEXT:    v_subrev_co_u32_e64 v9, s[0:1], s12, v8
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    s_subb_u32 s15, s7, 0
-; GCN-NEXT:    s_cmp_ge_u32 s15, s13
-; GCN-NEXT:    s_cselect_b32 s16, -1, 0
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v9
-; GCN-NEXT:    s_cmp_eq_u32 s15, s13
-; GCN-NEXT:    v_cndmask_b32_e64 v10, 0, -1, s[2:3]
-; GCN-NEXT:    v_mov_b32_e32 v11, s16
-; GCN-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    v_cndmask_b32_e64 v10, v11, v10, s[2:3]
-; GCN-NEXT:    s_subb_u32 s2, s7, s13
-; GCN-NEXT:    v_subrev_co_u32_e64 v11, s[0:1], s12, v9
-; GCN-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT:    s_subb_u32 s2, s2, 0
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v10
-; GCN-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v10, s15
-; GCN-NEXT:    v_mov_b32_e32 v11, s2
-; GCN-NEXT:    s_cmp_lg_u64 vcc, 0
-; GCN-NEXT:    v_cndmask_b32_e64 v10, v10, v11, s[0:1]
-; GCN-NEXT:    s_subb_u32 s0, s17, s5
-; GCN-NEXT:    s_cmp_ge_u32 s0, s13
-; GCN-NEXT:    s_cselect_b32 s1, -1, 0
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v8
-; GCN-NEXT:    s_cmp_eq_u32 s0, s13
-; GCN-NEXT:    v_cndmask_b32_e64 v11, 0, -1, vcc
-; GCN-NEXT:    v_mov_b32_e32 v14, s1
-; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
-; GCN-NEXT:    v_cndmask_b32_e32 v11, v14, v11, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v11
-; GCN-NEXT:    v_mov_b32_e32 v14, s0
-; GCN-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v10, v14, v10, vcc
-; GCN-NEXT:    v_xor_b32_e32 v8, s14, v8
-; GCN-NEXT:    v_xor_b32_e32 v9, s14, v10
-; GCN-NEXT:    v_mov_b32_e32 v10, s14
-; GCN-NEXT:    v_subrev_co_u32_e32 v8, vcc, s14, v8
-; GCN-NEXT:    v_subb_co_u32_e32 v9, vcc, v9, v10, vcc
+; GCN-NEXT:    s_ashr_i32 s6, s17, 31
+; GCN-NEXT:    s_add_u32 s20, s16, s6
+; GCN-NEXT:    s_mov_b32 s7, s6
+; GCN-NEXT:    s_addc_u32 s21, s17, s6
+; GCN-NEXT:    s_xor_b64 s[6:7], s[20:21], s[6:7]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s7
+; GCN-NEXT:    s_sub_u32 s17, 0, s6
+; GCN-NEXT:    s_subb_u32 s24, 0, s7
+; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
+; GCN-NEXT:    v_rcp_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_readfirstlane_b32 s25, v1
+; GCN-NEXT:    v_readfirstlane_b32 s22, v0
+; GCN-NEXT:    s_mul_i32 s23, s17, s25
+; GCN-NEXT:    s_mul_hi_u32 s27, s17, s22
+; GCN-NEXT:    s_mul_i32 s26, s24, s22
+; GCN-NEXT:    s_add_i32 s23, s27, s23
+; GCN-NEXT:    s_add_i32 s23, s23, s26
+; GCN-NEXT:    s_mul_i32 s28, s17, s22
+; GCN-NEXT:    s_mul_i32 s27, s22, s23
+; GCN-NEXT:    s_mul_hi_u32 s29, s22, s28
+; GCN-NEXT:    s_mul_hi_u32 s26, s22, s23
+; GCN-NEXT:    s_add_u32 s27, s29, s27
+; GCN-NEXT:    s_addc_u32 s26, 0, s26
+; GCN-NEXT:    s_mul_hi_u32 s30, s25, s28
+; GCN-NEXT:    s_mul_i32 s28, s25, s28
+; GCN-NEXT:    s_add_u32 s27, s27, s28
+; GCN-NEXT:    s_mul_hi_u32 s29, s25, s23
+; GCN-NEXT:    s_addc_u32 s26, s26, s30
+; GCN-NEXT:    s_addc_u32 s27, s29, 0
+; GCN-NEXT:    s_mul_i32 s23, s25, s23
+; GCN-NEXT:    s_add_u32 s23, s26, s23
+; GCN-NEXT:    s_addc_u32 s26, 0, s27
+; GCN-NEXT:    s_add_i32 s27, s22, s23
+; GCN-NEXT:    s_cselect_b64 s[22:23], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[22:23], 0
+; GCN-NEXT:    s_addc_u32 s25, s25, s26
+; GCN-NEXT:    s_mul_i32 s22, s17, s25
+; GCN-NEXT:    s_mul_hi_u32 s23, s17, s27
+; GCN-NEXT:    s_add_i32 s22, s23, s22
+; GCN-NEXT:    s_mul_i32 s24, s24, s27
+; GCN-NEXT:    s_add_i32 s22, s22, s24
+; GCN-NEXT:    s_mul_i32 s17, s17, s27
+; GCN-NEXT:    s_mul_hi_u32 s24, s25, s17
+; GCN-NEXT:    s_mul_i32 s26, s25, s17
+; GCN-NEXT:    s_mul_i32 s29, s27, s22
+; GCN-NEXT:    s_mul_hi_u32 s17, s27, s17
+; GCN-NEXT:    s_mul_hi_u32 s28, s27, s22
+; GCN-NEXT:    s_add_u32 s17, s17, s29
+; GCN-NEXT:    s_addc_u32 s28, 0, s28
+; GCN-NEXT:    s_add_u32 s17, s17, s26
+; GCN-NEXT:    s_mul_hi_u32 s23, s25, s22
+; GCN-NEXT:    s_addc_u32 s17, s28, s24
+; GCN-NEXT:    s_addc_u32 s23, s23, 0
+; GCN-NEXT:    s_mul_i32 s22, s25, s22
+; GCN-NEXT:    s_add_u32 s17, s17, s22
+; GCN-NEXT:    s_addc_u32 s24, 0, s23
+; GCN-NEXT:    s_add_i32 s27, s27, s17
+; GCN-NEXT:    s_cselect_b64 s[22:23], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[22:23], 0
+; GCN-NEXT:    s_addc_u32 s17, s25, s24
+; GCN-NEXT:    s_ashr_i32 s22, s19, 31
+; GCN-NEXT:    s_add_u32 s24, s18, s22
+; GCN-NEXT:    s_mov_b32 s23, s22
+; GCN-NEXT:    s_addc_u32 s25, s19, s22
+; GCN-NEXT:    s_xor_b64 s[24:25], s[24:25], s[22:23]
+; GCN-NEXT:    s_mul_i32 s26, s24, s17
+; GCN-NEXT:    s_mul_hi_u32 s28, s24, s27
+; GCN-NEXT:    s_mul_hi_u32 s19, s24, s17
+; GCN-NEXT:    s_add_u32 s26, s28, s26
+; GCN-NEXT:    s_addc_u32 s19, 0, s19
+; GCN-NEXT:    s_mul_hi_u32 s29, s25, s27
+; GCN-NEXT:    s_mul_i32 s27, s25, s27
+; GCN-NEXT:    s_add_u32 s26, s26, s27
+; GCN-NEXT:    s_mul_hi_u32 s28, s25, s17
+; GCN-NEXT:    s_addc_u32 s19, s19, s29
+; GCN-NEXT:    s_addc_u32 s26, s28, 0
+; GCN-NEXT:    s_mul_i32 s17, s25, s17
+; GCN-NEXT:    s_add_u32 s17, s19, s17
+; GCN-NEXT:    s_addc_u32 s19, 0, s26
+; GCN-NEXT:    s_mul_i32 s19, s6, s19
+; GCN-NEXT:    s_mul_hi_u32 s26, s6, s17
+; GCN-NEXT:    s_add_i32 s19, s26, s19
+; GCN-NEXT:    s_mul_i32 s26, s7, s17
+; GCN-NEXT:    s_add_i32 s19, s19, s26
+; GCN-NEXT:    s_sub_i32 s28, s25, s19
+; GCN-NEXT:    s_mul_i32 s17, s6, s17
+; GCN-NEXT:    s_sub_i32 s17, s24, s17
+; GCN-NEXT:    s_cselect_b64 s[26:27], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[26:27], 0
+; GCN-NEXT:    s_subb_u32 s24, s28, s7
+; GCN-NEXT:    s_sub_i32 s30, s17, s6
+; GCN-NEXT:    s_cselect_b64 s[28:29], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[28:29], 0
+; GCN-NEXT:    s_subb_u32 s31, s24, 0
+; GCN-NEXT:    s_cmp_ge_u32 s31, s7
+; GCN-NEXT:    s_cselect_b32 s33, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s30, s6
+; GCN-NEXT:    s_cselect_b32 s34, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s31, s7
+; GCN-NEXT:    s_cselect_b32 s33, s34, s33
+; GCN-NEXT:    s_cmp_lg_u64 s[28:29], 0
+; GCN-NEXT:    s_subb_u32 s24, s24, s7
+; GCN-NEXT:    s_sub_i32 s34, s30, s6
+; GCN-NEXT:    s_cselect_b64 s[28:29], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[28:29], 0
+; GCN-NEXT:    s_subb_u32 s24, s24, 0
+; GCN-NEXT:    s_cmp_lg_u32 s33, 0
+; GCN-NEXT:    s_cselect_b32 s28, s34, s30
+; GCN-NEXT:    s_cselect_b32 s24, s24, s31
+; GCN-NEXT:    s_cmp_lg_u64 s[26:27], 0
+; GCN-NEXT:    s_subb_u32 s19, s25, s19
+; GCN-NEXT:    s_cmp_ge_u32 s19, s7
+; GCN-NEXT:    s_cselect_b32 s25, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s17, s6
+; GCN-NEXT:    s_cselect_b32 s6, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s19, s7
+; GCN-NEXT:    s_cselect_b32 s6, s6, s25
+; GCN-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-NEXT:    s_cselect_b32 s7, s24, s19
+; GCN-NEXT:    s_cselect_b32 s6, s28, s17
+; GCN-NEXT:    s_xor_b64 s[6:7], s[6:7], s[22:23]
+; GCN-NEXT:    s_sub_u32 s6, s6, s22
+; GCN-NEXT:    s_subb_u32 s7, s7, s22
 ; GCN-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-NEXT:  .LBB12_2:
-; GCN-NEXT:    v_cvt_f32_u32_e32 v8, s4
-; GCN-NEXT:    s_sub_i32 s0, 0, s4
-; GCN-NEXT:    s_mov_b32 s1, 0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v8, v8
-; GCN-NEXT:    v_mul_f32_e32 v8, 0x4f7ffffe, v8
-; GCN-NEXT:    v_cvt_u32_f32_e32 v8, v8
-; GCN-NEXT:    v_readfirstlane_b32 s2, v8
-; GCN-NEXT:    s_mul_i32 s0, s0, s2
-; GCN-NEXT:    s_mul_hi_u32 s0, s2, s0
-; GCN-NEXT:    s_add_i32 s2, s2, s0
-; GCN-NEXT:    s_mul_hi_u32 s0, s6, s2
-; GCN-NEXT:    s_mul_i32 s0, s0, s4
-; GCN-NEXT:    s_sub_i32 s0, s6, s0
-; GCN-NEXT:    s_sub_i32 s2, s0, s4
-; GCN-NEXT:    s_cmp_ge_u32 s0, s4
-; GCN-NEXT:    s_cselect_b32 s0, s2, s0
-; GCN-NEXT:    s_sub_i32 s2, s0, s4
-; GCN-NEXT:    s_cmp_ge_u32 s0, s4
-; GCN-NEXT:    s_cselect_b32 s0, s2, s0
-; GCN-NEXT:    v_mov_b32_e32 v9, s1
-; GCN-NEXT:    v_mov_b32_e32 v8, s0
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s16
+; GCN-NEXT:    s_sub_i32 s6, 0, s16
+; GCN-NEXT:    s_mov_b32 s7, 0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_readfirstlane_b32 s17, v0
+; GCN-NEXT:    s_mul_i32 s6, s6, s17
+; GCN-NEXT:    s_mul_hi_u32 s6, s17, s6
+; GCN-NEXT:    s_add_i32 s17, s17, s6
+; GCN-NEXT:    s_mul_hi_u32 s6, s18, s17
+; GCN-NEXT:    s_mul_i32 s6, s6, s16
+; GCN-NEXT:    s_sub_i32 s6, s18, s6
+; GCN-NEXT:    s_sub_i32 s17, s6, s16
+; GCN-NEXT:    s_cmp_ge_u32 s6, s16
+; GCN-NEXT:    s_cselect_b32 s6, s17, s6
+; GCN-NEXT:    s_sub_i32 s17, s6, s16
+; GCN-NEXT:    s_cmp_ge_u32 s6, s16
+; GCN-NEXT:    s_cselect_b32 s6, s17, s6
 ; GCN-NEXT:  .LBB12_3:
-; GCN-NEXT:    v_or_b32_e32 v11, v17, v13
-; GCN-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
-; GCN-NEXT:    s_cbranch_vccz .LBB12_14
+; GCN-NEXT:    s_or_b64 s[16:17], s[14:15], s[12:13]
+; GCN-NEXT:    s_mov_b32 s16, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT:    s_cbranch_scc0 .LBB12_7
 ; GCN-NEXT:  ; %bb.4:
-; GCN-NEXT:    v_ashrrev_i32_e32 v10, 31, v13
-; GCN-NEXT:    v_add_co_u32_e32 v11, vcc, v12, v10
-; GCN-NEXT:    v_addc_co_u32_e32 v13, vcc, v13, v10, vcc
-; GCN-NEXT:    v_xor_b32_e32 v11, v11, v10
-; GCN-NEXT:    v_xor_b32_e32 v10, v13, v10
-; GCN-NEXT:    v_cvt_f32_u32_e32 v13, v11
-; GCN-NEXT:    v_cvt_f32_u32_e32 v14, v10
-; GCN-NEXT:    v_sub_co_u32_e32 v15, vcc, 0, v11
-; GCN-NEXT:    v_subb_co_u32_e32 v18, vcc, 0, v10, vcc
-; GCN-NEXT:    v_madmk_f32 v13, v14, 0x4f800000, v13
-; GCN-NEXT:    v_rcp_f32_e32 v13, v13
-; GCN-NEXT:    v_mul_f32_e32 v13, 0x5f7ffffc, v13
-; GCN-NEXT:    v_mul_f32_e32 v14, 0x2f800000, v13
-; GCN-NEXT:    v_trunc_f32_e32 v14, v14
-; GCN-NEXT:    v_madmk_f32 v13, v14, 0xcf800000, v13
-; GCN-NEXT:    v_cvt_u32_f32_e32 v14, v14
-; GCN-NEXT:    v_cvt_u32_f32_e32 v13, v13
-; GCN-NEXT:    v_mul_lo_u32 v20, v15, v14
-; GCN-NEXT:    v_mul_hi_u32 v19, v15, v13
-; GCN-NEXT:    v_mul_lo_u32 v21, v18, v13
-; GCN-NEXT:    v_mul_lo_u32 v22, v15, v13
-; GCN-NEXT:    v_add_u32_e32 v19, v19, v20
-; GCN-NEXT:    v_add_u32_e32 v19, v19, v21
-; GCN-NEXT:    v_mul_lo_u32 v20, v13, v19
-; GCN-NEXT:    v_mul_hi_u32 v21, v13, v22
-; GCN-NEXT:    v_mul_hi_u32 v23, v13, v19
-; GCN-NEXT:    v_mul_hi_u32 v24, v14, v19
-; GCN-NEXT:    v_mul_lo_u32 v19, v14, v19
-; GCN-NEXT:    v_add_co_u32_e32 v20, vcc, v21, v20
-; GCN-NEXT:    v_addc_co_u32_e32 v21, vcc, 0, v23, vcc
-; GCN-NEXT:    v_mul_lo_u32 v23, v14, v22
-; GCN-NEXT:    v_mul_hi_u32 v22, v14, v22
-; GCN-NEXT:    v_add_co_u32_e32 v20, vcc, v20, v23
-; GCN-NEXT:    v_addc_co_u32_e32 v20, vcc, v21, v22, vcc
-; GCN-NEXT:    v_addc_co_u32_e32 v21, vcc, 0, v24, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v19, vcc, v20, v19
-; GCN-NEXT:    v_addc_co_u32_e32 v20, vcc, 0, v21, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v13, vcc, v13, v19
-; GCN-NEXT:    v_addc_co_u32_e32 v14, vcc, v14, v20, vcc
-; GCN-NEXT:    v_mul_lo_u32 v19, v15, v14
-; GCN-NEXT:    v_mul_hi_u32 v20, v15, v13
-; GCN-NEXT:    v_mul_lo_u32 v18, v18, v13
-; GCN-NEXT:    v_mul_lo_u32 v15, v15, v13
-; GCN-NEXT:    v_add_u32_e32 v19, v20, v19
-; GCN-NEXT:    v_add_u32_e32 v18, v19, v18
-; GCN-NEXT:    v_mul_lo_u32 v21, v13, v18
-; GCN-NEXT:    v_mul_hi_u32 v22, v13, v15
-; GCN-NEXT:    v_mul_hi_u32 v23, v13, v18
-; GCN-NEXT:    v_mul_hi_u32 v20, v14, v15
-; GCN-NEXT:    v_mul_lo_u32 v15, v14, v15
-; GCN-NEXT:    v_mul_hi_u32 v19, v14, v18
-; GCN-NEXT:    v_add_co_u32_e32 v21, vcc, v22, v21
-; GCN-NEXT:    v_addc_co_u32_e32 v22, vcc, 0, v23, vcc
-; GCN-NEXT:    v_mul_lo_u32 v18, v14, v18
-; GCN-NEXT:    v_add_co_u32_e32 v15, vcc, v21, v15
-; GCN-NEXT:    v_addc_co_u32_e32 v15, vcc, v22, v20, vcc
-; GCN-NEXT:    v_addc_co_u32_e32 v19, vcc, 0, v19, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v15, vcc, v15, v18
-; GCN-NEXT:    v_addc_co_u32_e32 v18, vcc, 0, v19, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v13, vcc, v13, v15
-; GCN-NEXT:    v_addc_co_u32_e32 v14, vcc, v14, v18, vcc
-; GCN-NEXT:    v_ashrrev_i32_e32 v15, 31, v17
-; GCN-NEXT:    v_add_co_u32_e32 v18, vcc, v16, v15
-; GCN-NEXT:    v_xor_b32_e32 v18, v18, v15
-; GCN-NEXT:    v_mul_lo_u32 v19, v18, v14
-; GCN-NEXT:    v_mul_hi_u32 v20, v18, v13
-; GCN-NEXT:    v_mul_hi_u32 v21, v18, v14
-; GCN-NEXT:    v_addc_co_u32_e32 v17, vcc, v17, v15, vcc
-; GCN-NEXT:    v_xor_b32_e32 v17, v17, v15
-; GCN-NEXT:    v_add_co_u32_e32 v19, vcc, v20, v19
-; GCN-NEXT:    v_addc_co_u32_e32 v20, vcc, 0, v21, vcc
-; GCN-NEXT:    v_mul_lo_u32 v21, v17, v13
-; GCN-NEXT:    v_mul_hi_u32 v13, v17, v13
-; GCN-NEXT:    v_mul_hi_u32 v22, v17, v14
-; GCN-NEXT:    v_mul_lo_u32 v14, v17, v14
-; GCN-NEXT:    v_add_co_u32_e32 v19, vcc, v19, v21
-; GCN-NEXT:    v_addc_co_u32_e32 v13, vcc, v20, v13, vcc
-; GCN-NEXT:    v_addc_co_u32_e32 v19, vcc, 0, v22, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v13, vcc, v13, v14
-; GCN-NEXT:    v_addc_co_u32_e32 v14, vcc, 0, v19, vcc
-; GCN-NEXT:    v_mul_lo_u32 v14, v11, v14
-; GCN-NEXT:    v_mul_hi_u32 v19, v11, v13
-; GCN-NEXT:    v_mul_lo_u32 v20, v10, v13
-; GCN-NEXT:    v_mul_lo_u32 v13, v11, v13
-; GCN-NEXT:    v_add_u32_e32 v14, v19, v14
-; GCN-NEXT:    v_add_u32_e32 v14, v14, v20
-; GCN-NEXT:    v_sub_u32_e32 v19, v17, v14
-; GCN-NEXT:    v_sub_co_u32_e32 v13, vcc, v18, v13
-; GCN-NEXT:    v_subb_co_u32_e64 v18, s[0:1], v19, v10, vcc
-; GCN-NEXT:    v_sub_co_u32_e64 v19, s[0:1], v13, v11
-; GCN-NEXT:    v_subbrev_co_u32_e64 v20, s[2:3], 0, v18, s[0:1]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], v20, v10
-; GCN-NEXT:    v_cndmask_b32_e64 v21, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], v19, v11
-; GCN-NEXT:    v_subb_co_u32_e32 v14, vcc, v17, v14, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v22, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], v20, v10
-; GCN-NEXT:    v_subb_co_u32_e64 v18, s[0:1], v18, v10, s[0:1]
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v14, v10
-; GCN-NEXT:    v_cndmask_b32_e64 v21, v21, v22, s[2:3]
-; GCN-NEXT:    v_sub_co_u32_e64 v22, s[0:1], v19, v11
-; GCN-NEXT:    v_cndmask_b32_e64 v17, 0, -1, vcc
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v13, v11
-; GCN-NEXT:    v_subbrev_co_u32_e64 v18, s[0:1], 0, v18, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v11, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, v14, v10
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v21
-; GCN-NEXT:    v_cndmask_b32_e32 v10, v17, v11, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v19, v19, v22, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v10
-; GCN-NEXT:    v_cndmask_b32_e64 v18, v20, v18, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v11, v13, v19, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v10, v14, v18, vcc
-; GCN-NEXT:    v_xor_b32_e32 v11, v11, v15
-; GCN-NEXT:    v_xor_b32_e32 v13, v10, v15
-; GCN-NEXT:    v_sub_co_u32_e32 v10, vcc, v11, v15
-; GCN-NEXT:    v_subb_co_u32_e32 v11, vcc, v13, v15, vcc
-; GCN-NEXT:    s_cbranch_execnz .LBB12_6
+; GCN-NEXT:    s_ashr_i32 s16, s13, 31
+; GCN-NEXT:    s_add_u32 s18, s12, s16
+; GCN-NEXT:    s_mov_b32 s17, s16
+; GCN-NEXT:    s_addc_u32 s19, s13, s16
+; GCN-NEXT:    s_xor_b64 s[18:19], s[18:19], s[16:17]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s18
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s19
+; GCN-NEXT:    s_sub_u32 s13, 0, s18
+; GCN-NEXT:    s_subb_u32 s22, 0, s19
+; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
+; GCN-NEXT:    v_rcp_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_readfirstlane_b32 s23, v1
+; GCN-NEXT:    v_readfirstlane_b32 s20, v0
+; GCN-NEXT:    s_mul_i32 s21, s13, s23
+; GCN-NEXT:    s_mul_hi_u32 s25, s13, s20
+; GCN-NEXT:    s_mul_i32 s24, s22, s20
+; GCN-NEXT:    s_add_i32 s21, s25, s21
+; GCN-NEXT:    s_add_i32 s21, s21, s24
+; GCN-NEXT:    s_mul_i32 s26, s13, s20
+; GCN-NEXT:    s_mul_i32 s25, s20, s21
+; GCN-NEXT:    s_mul_hi_u32 s27, s20, s26
+; GCN-NEXT:    s_mul_hi_u32 s24, s20, s21
+; GCN-NEXT:    s_add_u32 s25, s27, s25
+; GCN-NEXT:    s_addc_u32 s24, 0, s24
+; GCN-NEXT:    s_mul_hi_u32 s28, s23, s26
+; GCN-NEXT:    s_mul_i32 s26, s23, s26
+; GCN-NEXT:    s_add_u32 s25, s25, s26
+; GCN-NEXT:    s_mul_hi_u32 s27, s23, s21
+; GCN-NEXT:    s_addc_u32 s24, s24, s28
+; GCN-NEXT:    s_addc_u32 s25, s27, 0
+; GCN-NEXT:    s_mul_i32 s21, s23, s21
+; GCN-NEXT:    s_add_u32 s21, s24, s21
+; GCN-NEXT:    s_addc_u32 s24, 0, s25
+; GCN-NEXT:    s_add_i32 s25, s20, s21
+; GCN-NEXT:    s_cselect_b64 s[20:21], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
+; GCN-NEXT:    s_addc_u32 s23, s23, s24
+; GCN-NEXT:    s_mul_i32 s20, s13, s23
+; GCN-NEXT:    s_mul_hi_u32 s21, s13, s25
+; GCN-NEXT:    s_add_i32 s20, s21, s20
+; GCN-NEXT:    s_mul_i32 s22, s22, s25
+; GCN-NEXT:    s_add_i32 s20, s20, s22
+; GCN-NEXT:    s_mul_i32 s13, s13, s25
+; GCN-NEXT:    s_mul_hi_u32 s22, s23, s13
+; GCN-NEXT:    s_mul_i32 s24, s23, s13
+; GCN-NEXT:    s_mul_i32 s27, s25, s20
+; GCN-NEXT:    s_mul_hi_u32 s13, s25, s13
+; GCN-NEXT:    s_mul_hi_u32 s26, s25, s20
+; GCN-NEXT:    s_add_u32 s13, s13, s27
+; GCN-NEXT:    s_addc_u32 s26, 0, s26
+; GCN-NEXT:    s_add_u32 s13, s13, s24
+; GCN-NEXT:    s_mul_hi_u32 s21, s23, s20
+; GCN-NEXT:    s_addc_u32 s13, s26, s22
+; GCN-NEXT:    s_addc_u32 s21, s21, 0
+; GCN-NEXT:    s_mul_i32 s20, s23, s20
+; GCN-NEXT:    s_add_u32 s13, s13, s20
+; GCN-NEXT:    s_addc_u32 s22, 0, s21
+; GCN-NEXT:    s_add_i32 s25, s25, s13
+; GCN-NEXT:    s_cselect_b64 s[20:21], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
+; GCN-NEXT:    s_addc_u32 s13, s23, s22
+; GCN-NEXT:    s_ashr_i32 s20, s15, 31
+; GCN-NEXT:    s_add_u32 s22, s14, s20
+; GCN-NEXT:    s_mov_b32 s21, s20
+; GCN-NEXT:    s_addc_u32 s23, s15, s20
+; GCN-NEXT:    s_xor_b64 s[22:23], s[22:23], s[20:21]
+; GCN-NEXT:    s_mul_i32 s24, s22, s13
+; GCN-NEXT:    s_mul_hi_u32 s26, s22, s25
+; GCN-NEXT:    s_mul_hi_u32 s15, s22, s13
+; GCN-NEXT:    s_add_u32 s24, s26, s24
+; GCN-NEXT:    s_addc_u32 s15, 0, s15
+; GCN-NEXT:    s_mul_hi_u32 s27, s23, s25
+; GCN-NEXT:    s_mul_i32 s25, s23, s25
+; GCN-NEXT:    s_add_u32 s24, s24, s25
+; GCN-NEXT:    s_mul_hi_u32 s26, s23, s13
+; GCN-NEXT:    s_addc_u32 s15, s15, s27
+; GCN-NEXT:    s_addc_u32 s24, s26, 0
+; GCN-NEXT:    s_mul_i32 s13, s23, s13
+; GCN-NEXT:    s_add_u32 s13, s15, s13
+; GCN-NEXT:    s_addc_u32 s15, 0, s24
+; GCN-NEXT:    s_mul_i32 s15, s18, s15
+; GCN-NEXT:    s_mul_hi_u32 s24, s18, s13
+; GCN-NEXT:    s_add_i32 s15, s24, s15
+; GCN-NEXT:    s_mul_i32 s24, s19, s13
+; GCN-NEXT:    s_add_i32 s15, s15, s24
+; GCN-NEXT:    s_sub_i32 s26, s23, s15
+; GCN-NEXT:    s_mul_i32 s13, s18, s13
+; GCN-NEXT:    s_sub_i32 s13, s22, s13
+; GCN-NEXT:    s_cselect_b64 s[24:25], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[24:25], 0
+; GCN-NEXT:    s_subb_u32 s22, s26, s19
+; GCN-NEXT:    s_sub_i32 s28, s13, s18
+; GCN-NEXT:    s_cselect_b64 s[26:27], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[26:27], 0
+; GCN-NEXT:    s_subb_u32 s29, s22, 0
+; GCN-NEXT:    s_cmp_ge_u32 s29, s19
+; GCN-NEXT:    s_cselect_b32 s30, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s28, s18
+; GCN-NEXT:    s_cselect_b32 s31, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s29, s19
+; GCN-NEXT:    s_cselect_b32 s30, s31, s30
+; GCN-NEXT:    s_cmp_lg_u64 s[26:27], 0
+; GCN-NEXT:    s_subb_u32 s22, s22, s19
+; GCN-NEXT:    s_sub_i32 s31, s28, s18
+; GCN-NEXT:    s_cselect_b64 s[26:27], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[26:27], 0
+; GCN-NEXT:    s_subb_u32 s22, s22, 0
+; GCN-NEXT:    s_cmp_lg_u32 s30, 0
+; GCN-NEXT:    s_cselect_b32 s26, s31, s28
+; GCN-NEXT:    s_cselect_b32 s22, s22, s29
+; GCN-NEXT:    s_cmp_lg_u64 s[24:25], 0
+; GCN-NEXT:    s_subb_u32 s15, s23, s15
+; GCN-NEXT:    s_cmp_ge_u32 s15, s19
+; GCN-NEXT:    s_cselect_b32 s23, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s13, s18
+; GCN-NEXT:    s_cselect_b32 s18, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s15, s19
+; GCN-NEXT:    s_cselect_b32 s18, s18, s23
+; GCN-NEXT:    s_cmp_lg_u32 s18, 0
+; GCN-NEXT:    s_cselect_b32 s19, s22, s15
+; GCN-NEXT:    s_cselect_b32 s18, s26, s13
+; GCN-NEXT:    s_xor_b64 s[18:19], s[18:19], s[20:21]
+; GCN-NEXT:    s_sub_u32 s18, s18, s20
+; GCN-NEXT:    s_subb_u32 s19, s19, s20
+; GCN-NEXT:    s_cbranch_execnz .LBB12_8
 ; GCN-NEXT:  .LBB12_5:
-; GCN-NEXT:    v_cvt_f32_u32_e32 v10, v12
-; GCN-NEXT:    v_sub_u32_e32 v11, 0, v12
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v10, v10
-; GCN-NEXT:    v_mul_f32_e32 v10, 0x4f7ffffe, v10
-; GCN-NEXT:    v_cvt_u32_f32_e32 v10, v10
-; GCN-NEXT:    v_mul_lo_u32 v11, v11, v10
-; GCN-NEXT:    v_mul_hi_u32 v11, v10, v11
-; GCN-NEXT:    v_add_u32_e32 v10, v10, v11
-; GCN-NEXT:    v_mul_hi_u32 v10, v16, v10
-; GCN-NEXT:    v_mul_lo_u32 v10, v10, v12
-; GCN-NEXT:    v_sub_u32_e32 v10, v16, v10
-; GCN-NEXT:    v_sub_u32_e32 v11, v10, v12
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v10, v12
-; GCN-NEXT:    v_cndmask_b32_e32 v10, v10, v11, vcc
-; GCN-NEXT:    v_sub_u32_e32 v11, v10, v12
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v10, v12
-; GCN-NEXT:    v_cndmask_b32_e32 v10, v10, v11, vcc
-; GCN-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s12
+; GCN-NEXT:    s_sub_i32 s13, 0, s12
+; GCN-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s13, v0
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    v_add_u32_e32 v0, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s14, v0
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s12
+; GCN-NEXT:    v_sub_u32_e32 v0, s14, v0
+; GCN-NEXT:    v_subrev_u32_e32 v1, s12, v0
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT:    v_subrev_u32_e32 v1, s12, v0
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GCN-NEXT:    s_branch .LBB12_9
 ; GCN-NEXT:  .LBB12_6:
-; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_or_b32_e32 v13, v5, v1
-; GCN-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[12:13]
-; GCN-NEXT:    s_cbranch_vccz .LBB12_15
-; GCN-NEXT:  ; %bb.7:
-; GCN-NEXT:    v_ashrrev_i32_e32 v13, 31, v1
-; GCN-NEXT:    v_add_co_u32_e32 v12, vcc, v0, v13
-; GCN-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v13, vcc
-; GCN-NEXT:    v_xor_b32_e32 v12, v12, v13
-; GCN-NEXT:    v_xor_b32_e32 v1, v1, v13
-; GCN-NEXT:    v_cvt_f32_u32_e32 v13, v12
-; GCN-NEXT:    v_cvt_f32_u32_e32 v14, v1
-; GCN-NEXT:    v_sub_co_u32_e32 v15, vcc, 0, v12
-; GCN-NEXT:    v_subb_co_u32_e32 v16, vcc, 0, v1, vcc
-; GCN-NEXT:    v_madmk_f32 v13, v14, 0x4f800000, v13
-; GCN-NEXT:    v_rcp_f32_e32 v13, v13
-; GCN-NEXT:    v_mul_f32_e32 v13, 0x5f7ffffc, v13
-; GCN-NEXT:    v_mul_f32_e32 v14, 0x2f800000, v13
-; GCN-NEXT:    v_trunc_f32_e32 v14, v14
-; GCN-NEXT:    v_madmk_f32 v13, v14, 0xcf800000, v13
-; GCN-NEXT:    v_cvt_u32_f32_e32 v14, v14
-; GCN-NEXT:    v_cvt_u32_f32_e32 v13, v13
-; GCN-NEXT:    v_mul_lo_u32 v18, v15, v14
-; GCN-NEXT:    v_mul_hi_u32 v17, v15, v13
-; GCN-NEXT:    v_mul_lo_u32 v19, v16, v13
-; GCN-NEXT:    v_mul_lo_u32 v20, v15, v13
-; GCN-NEXT:    v_add_u32_e32 v17, v17, v18
-; GCN-NEXT:    v_add_u32_e32 v17, v17, v19
-; GCN-NEXT:    v_mul_lo_u32 v18, v13, v17
-; GCN-NEXT:    v_mul_hi_u32 v19, v13, v20
-; GCN-NEXT:    v_mul_hi_u32 v21, v13, v17
-; GCN-NEXT:    v_mul_hi_u32 v22, v14, v17
-; GCN-NEXT:    v_mul_lo_u32 v17, v14, v17
-; GCN-NEXT:    v_add_co_u32_e32 v18, vcc, v19, v18
-; GCN-NEXT:    v_addc_co_u32_e32 v19, vcc, 0, v21, vcc
-; GCN-NEXT:    v_mul_lo_u32 v21, v14, v20
-; GCN-NEXT:    v_mul_hi_u32 v20, v14, v20
-; GCN-NEXT:    v_add_co_u32_e32 v18, vcc, v18, v21
-; GCN-NEXT:    v_addc_co_u32_e32 v18, vcc, v19, v20, vcc
-; GCN-NEXT:    v_addc_co_u32_e32 v19, vcc, 0, v22, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v17, vcc, v18, v17
-; GCN-NEXT:    v_addc_co_u32_e32 v18, vcc, 0, v19, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v13, vcc, v13, v17
-; GCN-NEXT:    v_addc_co_u32_e32 v14, vcc, v14, v18, vcc
-; GCN-NEXT:    v_mul_lo_u32 v17, v15, v14
-; GCN-NEXT:    v_mul_hi_u32 v18, v15, v13
-; GCN-NEXT:    v_mul_lo_u32 v16, v16, v13
-; GCN-NEXT:    v_mul_lo_u32 v15, v15, v13
-; GCN-NEXT:    v_add_u32_e32 v17, v18, v17
-; GCN-NEXT:    v_add_u32_e32 v16, v17, v16
-; GCN-NEXT:    v_mul_lo_u32 v19, v13, v16
-; GCN-NEXT:    v_mul_hi_u32 v20, v13, v15
-; GCN-NEXT:    v_mul_hi_u32 v21, v13, v16
-; GCN-NEXT:    v_mul_hi_u32 v18, v14, v15
-; GCN-NEXT:    v_mul_lo_u32 v15, v14, v15
-; GCN-NEXT:    v_mul_hi_u32 v17, v14, v16
-; GCN-NEXT:    v_add_co_u32_e32 v19, vcc, v20, v19
-; GCN-NEXT:    v_addc_co_u32_e32 v20, vcc, 0, v21, vcc
-; GCN-NEXT:    v_mul_lo_u32 v16, v14, v16
-; GCN-NEXT:    v_add_co_u32_e32 v15, vcc, v19, v15
-; GCN-NEXT:    v_addc_co_u32_e32 v15, vcc, v20, v18, vcc
-; GCN-NEXT:    v_addc_co_u32_e32 v17, vcc, 0, v17, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v15, vcc, v15, v16
-; GCN-NEXT:    v_addc_co_u32_e32 v16, vcc, 0, v17, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v13, vcc, v13, v15
-; GCN-NEXT:    v_addc_co_u32_e32 v14, vcc, v14, v16, vcc
-; GCN-NEXT:    v_ashrrev_i32_e32 v15, 31, v5
-; GCN-NEXT:    v_add_co_u32_e32 v16, vcc, v4, v15
-; GCN-NEXT:    v_xor_b32_e32 v16, v16, v15
-; GCN-NEXT:    v_mul_lo_u32 v17, v16, v14
-; GCN-NEXT:    v_mul_hi_u32 v18, v16, v13
-; GCN-NEXT:    v_mul_hi_u32 v19, v16, v14
-; GCN-NEXT:    v_addc_co_u32_e32 v5, vcc, v5, v15, vcc
-; GCN-NEXT:    v_xor_b32_e32 v5, v5, v15
-; GCN-NEXT:    v_add_co_u32_e32 v17, vcc, v18, v17
-; GCN-NEXT:    v_addc_co_u32_e32 v18, vcc, 0, v19, vcc
-; GCN-NEXT:    v_mul_lo_u32 v19, v5, v13
-; GCN-NEXT:    v_mul_hi_u32 v13, v5, v13
-; GCN-NEXT:    v_mul_hi_u32 v20, v5, v14
-; GCN-NEXT:    v_mul_lo_u32 v14, v5, v14
-; GCN-NEXT:    v_add_co_u32_e32 v17, vcc, v17, v19
-; GCN-NEXT:    v_addc_co_u32_e32 v13, vcc, v18, v13, vcc
-; GCN-NEXT:    v_addc_co_u32_e32 v17, vcc, 0, v20, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v13, vcc, v13, v14
-; GCN-NEXT:    v_addc_co_u32_e32 v14, vcc, 0, v17, vcc
-; GCN-NEXT:    v_mul_lo_u32 v14, v12, v14
-; GCN-NEXT:    v_mul_hi_u32 v17, v12, v13
-; GCN-NEXT:    v_mul_lo_u32 v18, v1, v13
-; GCN-NEXT:    v_mul_lo_u32 v13, v12, v13
-; GCN-NEXT:    v_add_u32_e32 v14, v17, v14
-; GCN-NEXT:    v_add_u32_e32 v14, v14, v18
-; GCN-NEXT:    v_sub_u32_e32 v17, v5, v14
-; GCN-NEXT:    v_sub_co_u32_e32 v13, vcc, v16, v13
-; GCN-NEXT:    v_subb_co_u32_e64 v16, s[0:1], v17, v1, vcc
-; GCN-NEXT:    v_sub_co_u32_e64 v17, s[0:1], v13, v12
-; GCN-NEXT:    v_subbrev_co_u32_e64 v18, s[2:3], 0, v16, s[0:1]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], v18, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v19, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], v17, v12
-; GCN-NEXT:    v_subb_co_u32_e32 v5, vcc, v5, v14, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v20, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], v18, v1
-; GCN-NEXT:    v_subb_co_u32_e64 v16, s[0:1], v16, v1, s[0:1]
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v19, v19, v20, s[2:3]
-; GCN-NEXT:    v_sub_co_u32_e64 v20, s[0:1], v17, v12
-; GCN-NEXT:    v_cndmask_b32_e64 v14, 0, -1, vcc
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v13, v12
-; GCN-NEXT:    v_subbrev_co_u32_e64 v16, s[0:1], 0, v16, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v12, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v1
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v19
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v14, v12, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v17, v17, v20, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v16, v18, v16, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v5, v16, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v5, v13, v17, vcc
-; GCN-NEXT:    v_xor_b32_e32 v5, v5, v15
-; GCN-NEXT:    v_xor_b32_e32 v1, v1, v15
-; GCN-NEXT:    v_sub_co_u32_e32 v12, vcc, v5, v15
-; GCN-NEXT:    v_subb_co_u32_e32 v13, vcc, v1, v15, vcc
-; GCN-NEXT:    s_cbranch_execnz .LBB12_9
+; GCN-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; GCN-NEXT:    s_branch .LBB12_2
+; GCN-NEXT:  .LBB12_7:
+; GCN-NEXT:    ; implicit-def: $sgpr18_sgpr19
+; GCN-NEXT:    s_branch .LBB12_5
 ; GCN-NEXT:  .LBB12_8:
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, v0
-; GCN-NEXT:    v_sub_u32_e32 v5, 0, v0
-; GCN-NEXT:    v_mov_b32_e32 v13, 0
-; GCN-NEXT:    v_rcp_iflag_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_f32_e32 v1, 0x4f7ffffe, v1
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_lo_u32 v5, v5, v1
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, v5
-; GCN-NEXT:    v_add_u32_e32 v1, v1, v5
-; GCN-NEXT:    v_mul_hi_u32 v1, v4, v1
-; GCN-NEXT:    v_mul_lo_u32 v1, v1, v0
-; GCN-NEXT:    v_sub_u32_e32 v1, v4, v1
-; GCN-NEXT:    v_sub_u32_e32 v4, v1, v0
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
-; GCN-NEXT:    v_sub_u32_e32 v4, v1, v0
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v0
-; GCN-NEXT:    v_cndmask_b32_e32 v12, v1, v4, vcc
+; GCN-NEXT:    v_mov_b32_e32 v2, s18
+; GCN-NEXT:    v_mov_b32_e32 v3, s19
 ; GCN-NEXT:  .LBB12_9:
-; GCN-NEXT:    v_or_b32_e32 v1, v7, v3
-; GCN-NEXT:    v_mov_b32_e32 v0, 0
-; GCN-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GCN-NEXT:    s_cbranch_vccz .LBB12_16
+; GCN-NEXT:    s_or_b64 s[12:13], s[10:11], s[8:9]
+; GCN-NEXT:    s_mov_b32 s12, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[12:13], 0
+; GCN-NEXT:    s_cbranch_scc0 .LBB12_12
 ; GCN-NEXT:  ; %bb.10:
-; GCN-NEXT:    v_ashrrev_i32_e32 v0, 31, v3
-; GCN-NEXT:    v_add_co_u32_e32 v1, vcc, v2, v0
-; GCN-NEXT:    v_addc_co_u32_e32 v3, vcc, v3, v0, vcc
-; GCN-NEXT:    v_xor_b32_e32 v1, v1, v0
-; GCN-NEXT:    v_xor_b32_e32 v0, v3, v0
-; GCN-NEXT:    v_cvt_f32_u32_e32 v3, v1
-; GCN-NEXT:    v_cvt_f32_u32_e32 v4, v0
-; GCN-NEXT:    v_sub_co_u32_e32 v5, vcc, 0, v1
-; GCN-NEXT:    v_subb_co_u32_e32 v14, vcc, 0, v0, vcc
-; GCN-NEXT:    v_madmk_f32 v3, v4, 0x4f800000, v3
-; GCN-NEXT:    v_rcp_f32_e32 v3, v3
-; GCN-NEXT:    v_mul_f32_e32 v3, 0x5f7ffffc, v3
-; GCN-NEXT:    v_mul_f32_e32 v4, 0x2f800000, v3
-; GCN-NEXT:    v_trunc_f32_e32 v4, v4
-; GCN-NEXT:    v_madmk_f32 v3, v4, 0xcf800000, v3
-; GCN-NEXT:    v_cvt_u32_f32_e32 v4, v4
-; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
-; GCN-NEXT:    v_mul_lo_u32 v16, v5, v4
-; GCN-NEXT:    v_mul_hi_u32 v15, v5, v3
-; GCN-NEXT:    v_mul_lo_u32 v17, v14, v3
-; GCN-NEXT:    v_mul_lo_u32 v18, v5, v3
-; GCN-NEXT:    v_add_u32_e32 v15, v15, v16
-; GCN-NEXT:    v_add_u32_e32 v15, v15, v17
-; GCN-NEXT:    v_mul_lo_u32 v16, v3, v15
-; GCN-NEXT:    v_mul_hi_u32 v17, v3, v18
-; GCN-NEXT:    v_mul_hi_u32 v19, v3, v15
-; GCN-NEXT:    v_mul_hi_u32 v20, v4, v15
-; GCN-NEXT:    v_mul_lo_u32 v15, v4, v15
-; GCN-NEXT:    v_add_co_u32_e32 v16, vcc, v17, v16
-; GCN-NEXT:    v_addc_co_u32_e32 v17, vcc, 0, v19, vcc
-; GCN-NEXT:    v_mul_lo_u32 v19, v4, v18
-; GCN-NEXT:    v_mul_hi_u32 v18, v4, v18
-; GCN-NEXT:    v_add_co_u32_e32 v16, vcc, v16, v19
-; GCN-NEXT:    v_addc_co_u32_e32 v16, vcc, v17, v18, vcc
-; GCN-NEXT:    v_addc_co_u32_e32 v17, vcc, 0, v20, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v15, vcc, v16, v15
-; GCN-NEXT:    v_addc_co_u32_e32 v16, vcc, 0, v17, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v15
-; GCN-NEXT:    v_addc_co_u32_e32 v4, vcc, v4, v16, vcc
-; GCN-NEXT:    v_mul_lo_u32 v15, v5, v4
-; GCN-NEXT:    v_mul_hi_u32 v16, v5, v3
-; GCN-NEXT:    v_mul_lo_u32 v14, v14, v3
-; GCN-NEXT:    v_mul_lo_u32 v5, v5, v3
-; GCN-NEXT:    v_add_u32_e32 v15, v16, v15
-; GCN-NEXT:    v_add_u32_e32 v14, v15, v14
-; GCN-NEXT:    v_mul_lo_u32 v17, v3, v14
-; GCN-NEXT:    v_mul_hi_u32 v18, v3, v5
-; GCN-NEXT:    v_mul_hi_u32 v19, v3, v14
-; GCN-NEXT:    v_mul_hi_u32 v16, v4, v5
-; GCN-NEXT:    v_mul_lo_u32 v5, v4, v5
-; GCN-NEXT:    v_mul_hi_u32 v15, v4, v14
-; GCN-NEXT:    v_add_co_u32_e32 v17, vcc, v18, v17
-; GCN-NEXT:    v_addc_co_u32_e32 v18, vcc, 0, v19, vcc
-; GCN-NEXT:    v_mul_lo_u32 v14, v4, v14
-; GCN-NEXT:    v_add_co_u32_e32 v5, vcc, v17, v5
-; GCN-NEXT:    v_addc_co_u32_e32 v5, vcc, v18, v16, vcc
-; GCN-NEXT:    v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v5, vcc, v5, v14
-; GCN-NEXT:    v_addc_co_u32_e32 v14, vcc, 0, v15, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_co_u32_e32 v4, vcc, v4, v14, vcc
-; GCN-NEXT:    v_ashrrev_i32_e32 v5, 31, v7
-; GCN-NEXT:    v_add_co_u32_e32 v14, vcc, v6, v5
-; GCN-NEXT:    v_xor_b32_e32 v14, v14, v5
-; GCN-NEXT:    v_mul_lo_u32 v15, v14, v4
-; GCN-NEXT:    v_mul_hi_u32 v16, v14, v3
-; GCN-NEXT:    v_mul_hi_u32 v17, v14, v4
-; GCN-NEXT:    v_addc_co_u32_e32 v7, vcc, v7, v5, vcc
-; GCN-NEXT:    v_xor_b32_e32 v7, v7, v5
-; GCN-NEXT:    v_add_co_u32_e32 v15, vcc, v16, v15
-; GCN-NEXT:    v_addc_co_u32_e32 v16, vcc, 0, v17, vcc
-; GCN-NEXT:    v_mul_lo_u32 v17, v7, v3
-; GCN-NEXT:    v_mul_hi_u32 v3, v7, v3
-; GCN-NEXT:    v_mul_hi_u32 v18, v7, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v7, v4
-; GCN-NEXT:    v_add_co_u32_e32 v15, vcc, v15, v17
-; GCN-NEXT:    v_addc_co_u32_e32 v3, vcc, v16, v3, vcc
-; GCN-NEXT:    v_addc_co_u32_e32 v15, vcc, 0, v18, vcc
-; GCN-NEXT:    v_add_co_u32_e32 v3, vcc, v3, v4
-; GCN-NEXT:    v_addc_co_u32_e32 v4, vcc, 0, v15, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v15, v1, v3
-; GCN-NEXT:    v_mul_lo_u32 v16, v0, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GCN-NEXT:    v_add_u32_e32 v4, v15, v4
-; GCN-NEXT:    v_add_u32_e32 v4, v4, v16
-; GCN-NEXT:    v_sub_u32_e32 v15, v7, v4
-; GCN-NEXT:    v_sub_co_u32_e32 v3, vcc, v14, v3
-; GCN-NEXT:    v_subb_co_u32_e64 v14, s[0:1], v15, v0, vcc
-; GCN-NEXT:    v_sub_co_u32_e64 v15, s[0:1], v3, v1
-; GCN-NEXT:    v_subbrev_co_u32_e64 v16, s[2:3], 0, v14, s[0:1]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], v16, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v17, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[2:3], v15, v1
-; GCN-NEXT:    v_subb_co_u32_e32 v4, vcc, v7, v4, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v18, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], v16, v0
-; GCN-NEXT:    v_subb_co_u32_e64 v14, s[0:1], v14, v0, s[0:1]
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v4, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v17, v17, v18, s[2:3]
-; GCN-NEXT:    v_sub_co_u32_e64 v18, s[0:1], v15, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, vcc
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v3, v1
-; GCN-NEXT:    v_subbrev_co_u32_e64 v14, s[0:1], 0, v14, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, v4, v0
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v17
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v7, v1, vcc
-; GCN-NEXT:    v_cndmask_b32_e64 v15, v15, v18, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v14, v16, v14, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v3, v15, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v4, v14, vcc
-; GCN-NEXT:    v_xor_b32_e32 v1, v1, v5
-; GCN-NEXT:    v_xor_b32_e32 v0, v0, v5
-; GCN-NEXT:    v_sub_co_u32_e32 v14, vcc, v1, v5
-; GCN-NEXT:    v_subb_co_u32_e32 v15, vcc, v0, v5, vcc
-; GCN-NEXT:    s_cbranch_execnz .LBB12_12
+; GCN-NEXT:    s_ashr_i32 s12, s9, 31
+; GCN-NEXT:    s_add_u32 s14, s8, s12
+; GCN-NEXT:    s_mov_b32 s13, s12
+; GCN-NEXT:    s_addc_u32 s15, s9, s12
+; GCN-NEXT:    s_xor_b64 s[14:15], s[14:15], s[12:13]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s14
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s15
+; GCN-NEXT:    s_sub_u32 s9, 0, s14
+; GCN-NEXT:    s_subb_u32 s18, 0, s15
+; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
+; GCN-NEXT:    v_rcp_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_readfirstlane_b32 s19, v1
+; GCN-NEXT:    v_readfirstlane_b32 s16, v0
+; GCN-NEXT:    s_mul_i32 s17, s9, s19
+; GCN-NEXT:    s_mul_hi_u32 s21, s9, s16
+; GCN-NEXT:    s_mul_i32 s20, s18, s16
+; GCN-NEXT:    s_add_i32 s17, s21, s17
+; GCN-NEXT:    s_add_i32 s17, s17, s20
+; GCN-NEXT:    s_mul_i32 s22, s9, s16
+; GCN-NEXT:    s_mul_i32 s21, s16, s17
+; GCN-NEXT:    s_mul_hi_u32 s23, s16, s22
+; GCN-NEXT:    s_mul_hi_u32 s20, s16, s17
+; GCN-NEXT:    s_add_u32 s21, s23, s21
+; GCN-NEXT:    s_addc_u32 s20, 0, s20
+; GCN-NEXT:    s_mul_hi_u32 s24, s19, s22
+; GCN-NEXT:    s_mul_i32 s22, s19, s22
+; GCN-NEXT:    s_add_u32 s21, s21, s22
+; GCN-NEXT:    s_mul_hi_u32 s23, s19, s17
+; GCN-NEXT:    s_addc_u32 s20, s20, s24
+; GCN-NEXT:    s_addc_u32 s21, s23, 0
+; GCN-NEXT:    s_mul_i32 s17, s19, s17
+; GCN-NEXT:    s_add_u32 s17, s20, s17
+; GCN-NEXT:    s_addc_u32 s20, 0, s21
+; GCN-NEXT:    s_add_i32 s21, s16, s17
+; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT:    s_addc_u32 s19, s19, s20
+; GCN-NEXT:    s_mul_i32 s16, s9, s19
+; GCN-NEXT:    s_mul_hi_u32 s17, s9, s21
+; GCN-NEXT:    s_add_i32 s16, s17, s16
+; GCN-NEXT:    s_mul_i32 s18, s18, s21
+; GCN-NEXT:    s_add_i32 s16, s16, s18
+; GCN-NEXT:    s_mul_i32 s9, s9, s21
+; GCN-NEXT:    s_mul_hi_u32 s18, s19, s9
+; GCN-NEXT:    s_mul_i32 s20, s19, s9
+; GCN-NEXT:    s_mul_i32 s23, s21, s16
+; GCN-NEXT:    s_mul_hi_u32 s9, s21, s9
+; GCN-NEXT:    s_mul_hi_u32 s22, s21, s16
+; GCN-NEXT:    s_add_u32 s9, s9, s23
+; GCN-NEXT:    s_addc_u32 s22, 0, s22
+; GCN-NEXT:    s_add_u32 s9, s9, s20
+; GCN-NEXT:    s_mul_hi_u32 s17, s19, s16
+; GCN-NEXT:    s_addc_u32 s9, s22, s18
+; GCN-NEXT:    s_addc_u32 s17, s17, 0
+; GCN-NEXT:    s_mul_i32 s16, s19, s16
+; GCN-NEXT:    s_add_u32 s9, s9, s16
+; GCN-NEXT:    s_addc_u32 s18, 0, s17
+; GCN-NEXT:    s_add_i32 s21, s21, s9
+; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT:    s_addc_u32 s9, s19, s18
+; GCN-NEXT:    s_ashr_i32 s16, s11, 31
+; GCN-NEXT:    s_add_u32 s18, s10, s16
+; GCN-NEXT:    s_mov_b32 s17, s16
+; GCN-NEXT:    s_addc_u32 s19, s11, s16
+; GCN-NEXT:    s_xor_b64 s[18:19], s[18:19], s[16:17]
+; GCN-NEXT:    s_mul_i32 s20, s18, s9
+; GCN-NEXT:    s_mul_hi_u32 s22, s18, s21
+; GCN-NEXT:    s_mul_hi_u32 s11, s18, s9
+; GCN-NEXT:    s_add_u32 s20, s22, s20
+; GCN-NEXT:    s_addc_u32 s11, 0, s11
+; GCN-NEXT:    s_mul_hi_u32 s23, s19, s21
+; GCN-NEXT:    s_mul_i32 s21, s19, s21
+; GCN-NEXT:    s_add_u32 s20, s20, s21
+; GCN-NEXT:    s_mul_hi_u32 s22, s19, s9
+; GCN-NEXT:    s_addc_u32 s11, s11, s23
+; GCN-NEXT:    s_addc_u32 s20, s22, 0
+; GCN-NEXT:    s_mul_i32 s9, s19, s9
+; GCN-NEXT:    s_add_u32 s9, s11, s9
+; GCN-NEXT:    s_addc_u32 s11, 0, s20
+; GCN-NEXT:    s_mul_i32 s11, s14, s11
+; GCN-NEXT:    s_mul_hi_u32 s20, s14, s9
+; GCN-NEXT:    s_add_i32 s11, s20, s11
+; GCN-NEXT:    s_mul_i32 s20, s15, s9
+; GCN-NEXT:    s_add_i32 s11, s11, s20
+; GCN-NEXT:    s_sub_i32 s22, s19, s11
+; GCN-NEXT:    s_mul_i32 s9, s14, s9
+; GCN-NEXT:    s_sub_i32 s9, s18, s9
+; GCN-NEXT:    s_cselect_b64 s[20:21], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
+; GCN-NEXT:    s_subb_u32 s18, s22, s15
+; GCN-NEXT:    s_sub_i32 s24, s9, s14
+; GCN-NEXT:    s_cselect_b64 s[22:23], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[22:23], 0
+; GCN-NEXT:    s_subb_u32 s25, s18, 0
+; GCN-NEXT:    s_cmp_ge_u32 s25, s15
+; GCN-NEXT:    s_cselect_b32 s26, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s24, s14
+; GCN-NEXT:    s_cselect_b32 s27, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s25, s15
+; GCN-NEXT:    s_cselect_b32 s26, s27, s26
+; GCN-NEXT:    s_cmp_lg_u64 s[22:23], 0
+; GCN-NEXT:    s_subb_u32 s18, s18, s15
+; GCN-NEXT:    s_sub_i32 s27, s24, s14
+; GCN-NEXT:    s_cselect_b64 s[22:23], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[22:23], 0
+; GCN-NEXT:    s_subb_u32 s18, s18, 0
+; GCN-NEXT:    s_cmp_lg_u32 s26, 0
+; GCN-NEXT:    s_cselect_b32 s22, s27, s24
+; GCN-NEXT:    s_cselect_b32 s18, s18, s25
+; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
+; GCN-NEXT:    s_subb_u32 s11, s19, s11
+; GCN-NEXT:    s_cmp_ge_u32 s11, s15
+; GCN-NEXT:    s_cselect_b32 s19, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s9, s14
+; GCN-NEXT:    s_cselect_b32 s14, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s11, s15
+; GCN-NEXT:    s_cselect_b32 s14, s14, s19
+; GCN-NEXT:    s_cmp_lg_u32 s14, 0
+; GCN-NEXT:    s_cselect_b32 s15, s18, s11
+; GCN-NEXT:    s_cselect_b32 s14, s22, s9
+; GCN-NEXT:    s_xor_b64 s[14:15], s[14:15], s[16:17]
+; GCN-NEXT:    s_sub_u32 s14, s14, s16
+; GCN-NEXT:    s_subb_u32 s15, s15, s16
+; GCN-NEXT:    s_cbranch_execnz .LBB12_13
 ; GCN-NEXT:  .LBB12_11:
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, v2
-; GCN-NEXT:    v_sub_u32_e32 v1, 0, v2
-; GCN-NEXT:    v_mov_b32_e32 v15, 0
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-NEXT:    s_sub_i32 s9, 0, s8
+; GCN-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_lo_u32 v1, v1, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s9, v0
 ; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
 ; GCN-NEXT:    v_add_u32_e32 v0, v0, v1
-; GCN-NEXT:    v_mul_hi_u32 v0, v6, v0
-; GCN-NEXT:    v_mul_lo_u32 v0, v0, v2
-; GCN-NEXT:    v_sub_u32_e32 v0, v6, v0
-; GCN-NEXT:    v_sub_u32_e32 v1, v0, v2
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
+; GCN-NEXT:    v_mul_hi_u32 v0, s10, v0
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s8
+; GCN-NEXT:    v_sub_u32_e32 v0, s10, v0
+; GCN-NEXT:    v_subrev_u32_e32 v1, s8, v0
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s8, v0
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; GCN-NEXT:    v_sub_u32_e32 v1, v0, v2
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v14, v0, v1, vcc
+; GCN-NEXT:    v_subrev_u32_e32 v1, s8, v0
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s8, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v4, v0, v1, vcc
+; GCN-NEXT:    s_branch .LBB12_14
 ; GCN-NEXT:  .LBB12_12:
-; GCN-NEXT:    v_mov_b32_e32 v0, 0
-; GCN-NEXT:    global_store_dwordx4 v0, v[12:15], s[8:9] offset:16
-; GCN-NEXT:    global_store_dwordx4 v0, v[8:11], s[8:9]
-; GCN-NEXT:    s_endpgm
+; GCN-NEXT:    ; implicit-def: $sgpr14_sgpr15
+; GCN-NEXT:    s_branch .LBB12_11
 ; GCN-NEXT:  .LBB12_13:
-; GCN-NEXT:    ; implicit-def: $vgpr8_vgpr9
-; GCN-NEXT:    s_branch .LBB12_2
+; GCN-NEXT:    v_mov_b32_e32 v4, s14
+; GCN-NEXT:    v_mov_b32_e32 v5, s15
 ; GCN-NEXT:  .LBB12_14:
-; GCN-NEXT:    s_branch .LBB12_5
-; GCN-NEXT:  .LBB12_15:
-; GCN-NEXT:    ; implicit-def: $vgpr12_vgpr13
-; GCN-NEXT:    s_branch .LBB12_8
+; GCN-NEXT:    s_or_b64 s[8:9], s[4:5], s[2:3]
+; GCN-NEXT:    s_mov_b32 s8, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GCN-NEXT:    s_cbranch_scc0 .LBB12_17
+; GCN-NEXT:  ; %bb.15:
+; GCN-NEXT:    s_ashr_i32 s8, s3, 31
+; GCN-NEXT:    s_add_u32 s10, s2, s8
+; GCN-NEXT:    s_mov_b32 s9, s8
+; GCN-NEXT:    s_addc_u32 s11, s3, s8
+; GCN-NEXT:    s_xor_b64 s[10:11], s[10:11], s[8:9]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s10
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s11
+; GCN-NEXT:    s_sub_u32 s3, 0, s10
+; GCN-NEXT:    s_subb_u32 s14, 0, s11
+; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
+; GCN-NEXT:    v_rcp_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
+; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
+; GCN-NEXT:    v_trunc_f32_e32 v1, v1
+; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_readfirstlane_b32 s15, v1
+; GCN-NEXT:    v_readfirstlane_b32 s12, v0
+; GCN-NEXT:    s_mul_i32 s13, s3, s15
+; GCN-NEXT:    s_mul_hi_u32 s17, s3, s12
+; GCN-NEXT:    s_mul_i32 s16, s14, s12
+; GCN-NEXT:    s_add_i32 s13, s17, s13
+; GCN-NEXT:    s_add_i32 s13, s13, s16
+; GCN-NEXT:    s_mul_i32 s18, s3, s12
+; GCN-NEXT:    s_mul_i32 s17, s12, s13
+; GCN-NEXT:    s_mul_hi_u32 s19, s12, s18
+; GCN-NEXT:    s_mul_hi_u32 s16, s12, s13
+; GCN-NEXT:    s_add_u32 s17, s19, s17
+; GCN-NEXT:    s_addc_u32 s16, 0, s16
+; GCN-NEXT:    s_mul_hi_u32 s20, s15, s18
+; GCN-NEXT:    s_mul_i32 s18, s15, s18
+; GCN-NEXT:    s_add_u32 s17, s17, s18
+; GCN-NEXT:    s_mul_hi_u32 s19, s15, s13
+; GCN-NEXT:    s_addc_u32 s16, s16, s20
+; GCN-NEXT:    s_addc_u32 s17, s19, 0
+; GCN-NEXT:    s_mul_i32 s13, s15, s13
+; GCN-NEXT:    s_add_u32 s13, s16, s13
+; GCN-NEXT:    s_addc_u32 s16, 0, s17
+; GCN-NEXT:    s_add_i32 s17, s12, s13
+; GCN-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[12:13], 0
+; GCN-NEXT:    s_addc_u32 s15, s15, s16
+; GCN-NEXT:    s_mul_i32 s12, s3, s15
+; GCN-NEXT:    s_mul_hi_u32 s13, s3, s17
+; GCN-NEXT:    s_add_i32 s12, s13, s12
+; GCN-NEXT:    s_mul_i32 s14, s14, s17
+; GCN-NEXT:    s_add_i32 s12, s12, s14
+; GCN-NEXT:    s_mul_i32 s3, s3, s17
+; GCN-NEXT:    s_mul_hi_u32 s14, s15, s3
+; GCN-NEXT:    s_mul_i32 s16, s15, s3
+; GCN-NEXT:    s_mul_i32 s19, s17, s12
+; GCN-NEXT:    s_mul_hi_u32 s3, s17, s3
+; GCN-NEXT:    s_mul_hi_u32 s18, s17, s12
+; GCN-NEXT:    s_add_u32 s3, s3, s19
+; GCN-NEXT:    s_addc_u32 s18, 0, s18
+; GCN-NEXT:    s_add_u32 s3, s3, s16
+; GCN-NEXT:    s_mul_hi_u32 s13, s15, s12
+; GCN-NEXT:    s_addc_u32 s3, s18, s14
+; GCN-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-NEXT:    s_mul_i32 s12, s15, s12
+; GCN-NEXT:    s_add_u32 s3, s3, s12
+; GCN-NEXT:    s_addc_u32 s14, 0, s13
+; GCN-NEXT:    s_add_i32 s17, s17, s3
+; GCN-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[12:13], 0
+; GCN-NEXT:    s_addc_u32 s3, s15, s14
+; GCN-NEXT:    s_ashr_i32 s12, s5, 31
+; GCN-NEXT:    s_add_u32 s14, s4, s12
+; GCN-NEXT:    s_mov_b32 s13, s12
+; GCN-NEXT:    s_addc_u32 s15, s5, s12
+; GCN-NEXT:    s_xor_b64 s[14:15], s[14:15], s[12:13]
+; GCN-NEXT:    s_mul_i32 s16, s14, s3
+; GCN-NEXT:    s_mul_hi_u32 s18, s14, s17
+; GCN-NEXT:    s_mul_hi_u32 s5, s14, s3
+; GCN-NEXT:    s_add_u32 s16, s18, s16
+; GCN-NEXT:    s_addc_u32 s5, 0, s5
+; GCN-NEXT:    s_mul_hi_u32 s19, s15, s17
+; GCN-NEXT:    s_mul_i32 s17, s15, s17
+; GCN-NEXT:    s_add_u32 s16, s16, s17
+; GCN-NEXT:    s_mul_hi_u32 s18, s15, s3
+; GCN-NEXT:    s_addc_u32 s5, s5, s19
+; GCN-NEXT:    s_addc_u32 s16, s18, 0
+; GCN-NEXT:    s_mul_i32 s3, s15, s3
+; GCN-NEXT:    s_add_u32 s3, s5, s3
+; GCN-NEXT:    s_addc_u32 s5, 0, s16
+; GCN-NEXT:    s_mul_i32 s5, s10, s5
+; GCN-NEXT:    s_mul_hi_u32 s16, s10, s3
+; GCN-NEXT:    s_add_i32 s5, s16, s5
+; GCN-NEXT:    s_mul_i32 s16, s11, s3
+; GCN-NEXT:    s_add_i32 s5, s5, s16
+; GCN-NEXT:    s_sub_i32 s18, s15, s5
+; GCN-NEXT:    s_mul_i32 s3, s10, s3
+; GCN-NEXT:    s_sub_i32 s3, s14, s3
+; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT:    s_subb_u32 s14, s18, s11
+; GCN-NEXT:    s_sub_i32 s20, s3, s10
+; GCN-NEXT:    s_cselect_b64 s[18:19], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT:    s_subb_u32 s21, s14, 0
+; GCN-NEXT:    s_cmp_ge_u32 s21, s11
+; GCN-NEXT:    s_cselect_b32 s22, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s20, s10
+; GCN-NEXT:    s_cselect_b32 s23, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s21, s11
+; GCN-NEXT:    s_cselect_b32 s22, s23, s22
+; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT:    s_subb_u32 s14, s14, s11
+; GCN-NEXT:    s_sub_i32 s23, s20, s10
+; GCN-NEXT:    s_cselect_b64 s[18:19], 1, 0
+; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT:    s_subb_u32 s14, s14, 0
+; GCN-NEXT:    s_cmp_lg_u32 s22, 0
+; GCN-NEXT:    s_cselect_b32 s18, s23, s20
+; GCN-NEXT:    s_cselect_b32 s14, s14, s21
+; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT:    s_subb_u32 s5, s15, s5
+; GCN-NEXT:    s_cmp_ge_u32 s5, s11
+; GCN-NEXT:    s_cselect_b32 s15, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s3, s10
+; GCN-NEXT:    s_cselect_b32 s10, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s5, s11
+; GCN-NEXT:    s_cselect_b32 s10, s10, s15
+; GCN-NEXT:    s_cmp_lg_u32 s10, 0
+; GCN-NEXT:    s_cselect_b32 s11, s14, s5
+; GCN-NEXT:    s_cselect_b32 s10, s18, s3
+; GCN-NEXT:    s_xor_b64 s[10:11], s[10:11], s[12:13]
+; GCN-NEXT:    s_sub_u32 s10, s10, s12
+; GCN-NEXT:    s_subb_u32 s11, s11, s12
+; GCN-NEXT:    s_cbranch_execnz .LBB12_18
 ; GCN-NEXT:  .LBB12_16:
-; GCN-NEXT:    s_branch .LBB12_11
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-NEXT:    s_sub_i32 s3, 0, s2
+; GCN-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; GCN-NEXT:    v_mul_lo_u32 v1, s3, v0
+; GCN-NEXT:    v_mul_hi_u32 v1, v0, v1
+; GCN-NEXT:    v_add_u32_e32 v0, v0, v1
+; GCN-NEXT:    v_mul_hi_u32 v0, s4, v0
+; GCN-NEXT:    v_mul_lo_u32 v0, v0, s2
+; GCN-NEXT:    v_sub_u32_e32 v0, s4, v0
+; GCN-NEXT:    v_subrev_u32_e32 v1, s2, v0
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
+; GCN-NEXT:    v_subrev_u32_e32 v1, s2, v0
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v0
+; GCN-NEXT:    v_cndmask_b32_e32 v6, v0, v1, vcc
+; GCN-NEXT:    s_branch .LBB12_19
+; GCN-NEXT:  .LBB12_17:
+; GCN-NEXT:    ; implicit-def: $sgpr10_sgpr11
+; GCN-NEXT:    s_branch .LBB12_16
+; GCN-NEXT:  .LBB12_18:
+; GCN-NEXT:    v_mov_b32_e32 v6, s10
+; GCN-NEXT:    v_mov_b32_e32 v7, s11
+; GCN-NEXT:  .LBB12_19:
+; GCN-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NEXT:    v_mov_b32_e32 v1, s7
+; GCN-NEXT:    global_store_dwordx4 v8, v[4:7], s[0:1] offset:16
+; GCN-NEXT:    global_store_dwordx4 v8, v[0:3], s[0:1]
+; GCN-NEXT:    s_endpgm
 ;
 ; TAHITI-LABEL: srem_v4i64:
 ; TAHITI:       ; %bb.0:
@@ -5546,7 +5639,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TAHITI-NEXT:    v_mul_lo_u32 v20, v20, v11
 ; TAHITI-NEXT:    v_mul_lo_u32 v19, v19, v11
 ; TAHITI-NEXT:    v_add_i32_e32 v21, vcc, v21, v22
-; TAHITI-NEXT:    v_add_i32_e32 v20, vcc, v21, v20
+; TAHITI-NEXT:    v_add_i32_e32 v20, vcc, v20, v21
 ; TAHITI-NEXT:    v_mul_lo_u32 v23, v11, v20
 ; TAHITI-NEXT:    v_mul_hi_u32 v24, v11, v19
 ; TAHITI-NEXT:    v_mul_hi_u32 v25, v11, v20
@@ -5689,7 +5782,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TAHITI-NEXT:    v_mul_lo_u32 v18, v18, v13
 ; TAHITI-NEXT:    v_mul_lo_u32 v15, v15, v13
 ; TAHITI-NEXT:    v_add_i32_e32 v19, vcc, v19, v20
-; TAHITI-NEXT:    v_add_i32_e32 v18, vcc, v19, v18
+; TAHITI-NEXT:    v_add_i32_e32 v18, vcc, v18, v19
 ; TAHITI-NEXT:    v_mul_lo_u32 v21, v13, v18
 ; TAHITI-NEXT:    v_mul_hi_u32 v22, v13, v15
 ; TAHITI-NEXT:    v_mul_hi_u32 v23, v13, v18
@@ -5833,7 +5926,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TAHITI-NEXT:    v_mul_lo_u32 v16, v16, v13
 ; TAHITI-NEXT:    v_mul_lo_u32 v15, v15, v13
 ; TAHITI-NEXT:    v_add_i32_e32 v17, vcc, v17, v18
-; TAHITI-NEXT:    v_add_i32_e32 v16, vcc, v17, v16
+; TAHITI-NEXT:    v_add_i32_e32 v16, vcc, v16, v17
 ; TAHITI-NEXT:    v_mul_lo_u32 v19, v13, v16
 ; TAHITI-NEXT:    v_mul_hi_u32 v20, v13, v15
 ; TAHITI-NEXT:    v_mul_hi_u32 v21, v13, v16
@@ -5976,7 +6069,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TAHITI-NEXT:    v_mul_lo_u32 v14, v14, v3
 ; TAHITI-NEXT:    v_mul_lo_u32 v5, v5, v3
 ; TAHITI-NEXT:    v_add_i32_e32 v15, vcc, v15, v16
-; TAHITI-NEXT:    v_add_i32_e32 v14, vcc, v15, v14
+; TAHITI-NEXT:    v_add_i32_e32 v14, vcc, v14, v15
 ; TAHITI-NEXT:    v_mul_lo_u32 v17, v3, v14
 ; TAHITI-NEXT:    v_mul_hi_u32 v18, v3, v5
 ; TAHITI-NEXT:    v_mul_hi_u32 v19, v3, v14
@@ -6089,7 +6182,6 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-LABEL: srem_v4i64:
 ; TONGA:       ; %bb.0:
 ; TONGA-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x24
-; TONGA-NEXT:    v_mov_b32_e32 v8, 0
 ; TONGA-NEXT:    s_waitcnt lgkmcnt(0)
 ; TONGA-NEXT:    s_add_u32 s0, s6, 48
 ; TONGA-NEXT:    v_mov_b32_e32 v0, s6
@@ -6109,249 +6201,279 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_mov_b32_e32 v4, s0
 ; TONGA-NEXT:    flat_load_dwordx4 v[0:3], v[0:1]
 ; TONGA-NEXT:    flat_load_dwordx4 v[4:7], v[4:5]
+; TONGA-NEXT:    s_waitcnt vmcnt(3)
+; TONGA-NEXT:    v_readfirstlane_b32 s3, v15
+; TONGA-NEXT:    v_readfirstlane_b32 s2, v14
 ; TONGA-NEXT:    s_waitcnt vmcnt(2)
-; TONGA-NEXT:    v_or_b32_e32 v9, v15, v11
-; TONGA-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; TONGA-NEXT:    s_cbranch_vccz .LBB12_13
+; TONGA-NEXT:    v_readfirstlane_b32 s1, v11
+; TONGA-NEXT:    v_readfirstlane_b32 s0, v10
+; TONGA-NEXT:    s_or_b64 s[6:7], s[2:3], s[0:1]
+; TONGA-NEXT:    s_mov_b32 s6, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; TONGA-NEXT:    s_cbranch_scc0 .LBB12_3
 ; TONGA-NEXT:  ; %bb.1:
-; TONGA-NEXT:    v_ashrrev_i32_e32 v8, 31, v11
-; TONGA-NEXT:    v_add_u32_e32 v9, vcc, v10, v8
-; TONGA-NEXT:    v_addc_u32_e32 v11, vcc, v11, v8, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v9, v9, v8
-; TONGA-NEXT:    v_xor_b32_e32 v8, v11, v8
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v11, v9
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v18, v8
-; TONGA-NEXT:    v_sub_u32_e32 v23, vcc, 0, v9
-; TONGA-NEXT:    v_subb_u32_e32 v24, vcc, 0, v8, vcc
-; TONGA-NEXT:    v_madmk_f32 v11, v18, 0x4f800000, v11
-; TONGA-NEXT:    v_rcp_f32_e32 v11, v11
-; TONGA-NEXT:    v_mul_f32_e32 v11, 0x5f7ffffc, v11
-; TONGA-NEXT:    v_mul_f32_e32 v18, 0x2f800000, v11
-; TONGA-NEXT:    v_trunc_f32_e32 v18, v18
-; TONGA-NEXT:    v_madmk_f32 v11, v18, 0xcf800000, v11
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v22, v18
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v11, v11
-; TONGA-NEXT:    v_mul_lo_u32 v20, v23, v22
-; TONGA-NEXT:    v_mad_u64_u32 v[18:19], s[0:1], v23, v11, 0
-; TONGA-NEXT:    v_mul_lo_u32 v21, v24, v11
-; TONGA-NEXT:    v_add_u32_e32 v19, vcc, v19, v20
-; TONGA-NEXT:    v_add_u32_e32 v21, vcc, v19, v21
-; TONGA-NEXT:    v_mad_u64_u32 v[19:20], s[0:1], v11, v21, 0
-; TONGA-NEXT:    v_mul_hi_u32 v25, v11, v18
-; TONGA-NEXT:    v_add_u32_e32 v25, vcc, v25, v19
-; TONGA-NEXT:    v_mad_u64_u32 v[18:19], s[0:1], v22, v18, 0
-; TONGA-NEXT:    v_addc_u32_e32 v26, vcc, 0, v20, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[20:21], s[0:1], v22, v21, 0
-; TONGA-NEXT:    v_add_u32_e32 v18, vcc, v25, v18
-; TONGA-NEXT:    v_addc_u32_e32 v18, vcc, v26, v19, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v19, vcc, 0, v21, vcc
-; TONGA-NEXT:    v_add_u32_e32 v18, vcc, v18, v20
-; TONGA-NEXT:    v_addc_u32_e32 v19, vcc, 0, v19, vcc
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v11, v18
-; TONGA-NEXT:    v_addc_u32_e32 v25, vcc, v22, v19, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[18:19], s[0:1], v23, v11, 0
-; TONGA-NEXT:    v_mul_lo_u32 v22, v23, v25
-; TONGA-NEXT:    v_mul_lo_u32 v23, v24, v11
-; TONGA-NEXT:    v_mul_hi_u32 v24, v11, v18
-; TONGA-NEXT:    v_mad_u64_u32 v[20:21], s[0:1], v25, v18, 0
-; TONGA-NEXT:    v_add_u32_e32 v19, vcc, v22, v19
-; TONGA-NEXT:    v_add_u32_e32 v19, vcc, v19, v23
-; TONGA-NEXT:    v_mad_u64_u32 v[22:23], s[0:1], v11, v19, 0
-; TONGA-NEXT:    v_mad_u64_u32 v[18:19], s[0:1], v25, v19, 0
-; TONGA-NEXT:    v_add_u32_e32 v22, vcc, v24, v22
-; TONGA-NEXT:    v_addc_u32_e32 v23, vcc, 0, v23, vcc
-; TONGA-NEXT:    v_add_u32_e32 v20, vcc, v22, v20
-; TONGA-NEXT:    v_addc_u32_e32 v20, vcc, v23, v21, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v19, vcc, 0, v19, vcc
-; TONGA-NEXT:    v_add_u32_e32 v18, vcc, v20, v18
-; TONGA-NEXT:    v_addc_u32_e32 v19, vcc, 0, v19, vcc
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v11, v18
-; TONGA-NEXT:    v_addc_u32_e32 v20, vcc, v25, v19, vcc
-; TONGA-NEXT:    v_ashrrev_i32_e32 v22, 31, v15
-; TONGA-NEXT:    v_add_u32_e32 v18, vcc, v14, v22
-; TONGA-NEXT:    v_xor_b32_e32 v23, v18, v22
-; TONGA-NEXT:    v_mad_u64_u32 v[18:19], s[0:1], v23, v20, 0
-; TONGA-NEXT:    v_mul_hi_u32 v21, v23, v11
-; TONGA-NEXT:    v_addc_u32_e32 v15, vcc, v15, v22, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v15, v15, v22
-; TONGA-NEXT:    v_add_u32_e32 v24, vcc, v21, v18
-; TONGA-NEXT:    v_addc_u32_e32 v25, vcc, 0, v19, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[18:19], s[0:1], v15, v11, 0
-; TONGA-NEXT:    v_mad_u64_u32 v[20:21], s[0:1], v15, v20, 0
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v24, v18
-; TONGA-NEXT:    v_addc_u32_e32 v11, vcc, v25, v19, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v18, vcc, 0, v21, vcc
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v11, v20
-; TONGA-NEXT:    v_addc_u32_e32 v18, vcc, 0, v18, vcc
-; TONGA-NEXT:    v_mul_lo_u32 v20, v9, v18
-; TONGA-NEXT:    v_mad_u64_u32 v[18:19], s[0:1], v9, v11, 0
-; TONGA-NEXT:    v_mul_lo_u32 v11, v8, v11
-; TONGA-NEXT:    v_add_u32_e32 v19, vcc, v20, v19
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v11, v19
-; TONGA-NEXT:    v_sub_u32_e32 v19, vcc, v15, v11
-; TONGA-NEXT:    v_sub_u32_e32 v18, vcc, v23, v18
-; TONGA-NEXT:    v_subb_u32_e64 v19, s[0:1], v19, v8, vcc
-; TONGA-NEXT:    v_sub_u32_e64 v20, s[0:1], v18, v9
-; TONGA-NEXT:    v_subbrev_u32_e64 v21, s[2:3], 0, v19, s[0:1]
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v21, v8
-; TONGA-NEXT:    v_cndmask_b32_e64 v23, 0, -1, s[2:3]
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v20, v9
-; TONGA-NEXT:    v_subb_u32_e32 v11, vcc, v15, v11, vcc
-; TONGA-NEXT:    v_cndmask_b32_e64 v24, 0, -1, s[2:3]
-; TONGA-NEXT:    v_cmp_eq_u32_e64 s[2:3], v21, v8
-; TONGA-NEXT:    v_subb_u32_e64 v19, s[0:1], v19, v8, s[0:1]
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v11, v8
-; TONGA-NEXT:    v_cndmask_b32_e64 v23, v23, v24, s[2:3]
-; TONGA-NEXT:    v_sub_u32_e64 v24, s[0:1], v20, v9
-; TONGA-NEXT:    v_cndmask_b32_e64 v15, 0, -1, vcc
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v18, v9
-; TONGA-NEXT:    v_subbrev_u32_e64 v19, s[0:1], 0, v19, s[0:1]
-; TONGA-NEXT:    v_cndmask_b32_e64 v9, 0, -1, vcc
-; TONGA-NEXT:    v_cmp_eq_u32_e32 vcc, v11, v8
-; TONGA-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v23
-; TONGA-NEXT:    v_cndmask_b32_e32 v8, v15, v9, vcc
-; TONGA-NEXT:    v_cndmask_b32_e64 v20, v20, v24, s[0:1]
-; TONGA-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v8
-; TONGA-NEXT:    v_cndmask_b32_e64 v19, v21, v19, s[0:1]
-; TONGA-NEXT:    v_cndmask_b32_e32 v9, v18, v20, vcc
-; TONGA-NEXT:    v_cndmask_b32_e32 v8, v11, v19, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v9, v9, v22
-; TONGA-NEXT:    v_xor_b32_e32 v11, v8, v22
-; TONGA-NEXT:    v_sub_u32_e32 v8, vcc, v9, v22
-; TONGA-NEXT:    v_subb_u32_e32 v9, vcc, v11, v22, vcc
-; TONGA-NEXT:    s_cbranch_execnz .LBB12_3
+; TONGA-NEXT:    s_ashr_i32 s6, s1, 31
+; TONGA-NEXT:    s_add_u32 s8, s0, s6
+; TONGA-NEXT:    s_mov_b32 s7, s6
+; TONGA-NEXT:    s_addc_u32 s9, s1, s6
+; TONGA-NEXT:    s_xor_b64 s[6:7], s[8:9], s[6:7]
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v8, s6
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v9, s7
+; TONGA-NEXT:    s_sub_u32 s1, 0, s6
+; TONGA-NEXT:    s_subb_u32 s10, 0, s7
+; TONGA-NEXT:    v_madmk_f32 v8, v9, 0x4f800000, v8
+; TONGA-NEXT:    v_rcp_f32_e32 v8, v8
+; TONGA-NEXT:    v_mul_f32_e32 v8, 0x5f7ffffc, v8
+; TONGA-NEXT:    v_mul_f32_e32 v9, 0x2f800000, v8
+; TONGA-NEXT:    v_trunc_f32_e32 v9, v9
+; TONGA-NEXT:    v_madmk_f32 v8, v9, 0xcf800000, v8
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v14, v9
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v15, v8
+; TONGA-NEXT:    v_mul_lo_u32 v10, s1, v14
+; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[8:9], s1, v15, 0
+; TONGA-NEXT:    v_mul_lo_u32 v11, s10, v15
+; TONGA-NEXT:    v_add_u32_e32 v9, vcc, v9, v10
+; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v9, v11
+; TONGA-NEXT:    v_mul_hi_u32 v18, v15, v8
+; TONGA-NEXT:    v_mad_u64_u32 v[9:10], s[8:9], v15, v11, 0
+; TONGA-NEXT:    v_add_u32_e32 v18, vcc, v18, v9
+; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[8:9], v14, v8, 0
+; TONGA-NEXT:    v_addc_u32_e32 v19, vcc, 0, v10, vcc
+; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[8:9], v14, v11, 0
+; TONGA-NEXT:    v_add_u32_e32 v8, vcc, v18, v8
+; TONGA-NEXT:    v_addc_u32_e32 v8, vcc, v19, v9, vcc
+; TONGA-NEXT:    v_addc_u32_e32 v9, vcc, 0, v11, vcc
+; TONGA-NEXT:    v_add_u32_e32 v8, vcc, v8, v10
+; TONGA-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; TONGA-NEXT:    v_add_u32_e32 v18, vcc, v15, v8
+; TONGA-NEXT:    v_addc_u32_e32 v19, vcc, v14, v9, vcc
+; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[8:9], s1, v18, 0
+; TONGA-NEXT:    v_mul_lo_u32 v14, s1, v19
+; TONGA-NEXT:    v_mul_lo_u32 v15, s10, v18
+; TONGA-NEXT:    v_mul_hi_u32 v20, v18, v8
+; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[8:9], v19, v8, 0
+; TONGA-NEXT:    v_add_u32_e32 v9, vcc, v14, v9
+; TONGA-NEXT:    v_add_u32_e32 v9, vcc, v15, v9
+; TONGA-NEXT:    v_mad_u64_u32 v[14:15], s[8:9], v18, v9, 0
+; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[8:9], v19, v9, 0
+; TONGA-NEXT:    v_add_u32_e32 v14, vcc, v20, v14
+; TONGA-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v14, v10
+; TONGA-NEXT:    v_addc_u32_e32 v10, vcc, v15, v11, vcc
+; TONGA-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; TONGA-NEXT:    v_add_u32_e32 v8, vcc, v10, v8
+; TONGA-NEXT:    s_ashr_i32 s10, s3, 31
+; TONGA-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; TONGA-NEXT:    s_add_u32 s8, s2, s10
+; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v18, v8
+; TONGA-NEXT:    s_mov_b32 s11, s10
+; TONGA-NEXT:    s_addc_u32 s9, s3, s10
+; TONGA-NEXT:    v_addc_u32_e32 v11, vcc, v19, v9, vcc
+; TONGA-NEXT:    s_xor_b64 s[12:13], s[8:9], s[10:11]
+; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[8:9], s12, v11, 0
+; TONGA-NEXT:    v_mul_hi_u32 v14, s12, v10
+; TONGA-NEXT:    v_readfirstlane_b32 s1, v9
+; TONGA-NEXT:    v_readfirstlane_b32 s3, v8
+; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[8:9], s13, v11, 0
+; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[8:9], s13, v10, 0
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v14
+; TONGA-NEXT:    s_add_u32 s3, s14, s3
+; TONGA-NEXT:    s_addc_u32 s1, 0, s1
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v10
+; TONGA-NEXT:    v_readfirstlane_b32 s9, v11
+; TONGA-NEXT:    s_add_u32 s3, s3, s14
+; TONGA-NEXT:    v_readfirstlane_b32 s8, v9
+; TONGA-NEXT:    s_addc_u32 s1, s1, s9
+; TONGA-NEXT:    s_addc_u32 s3, s8, 0
+; TONGA-NEXT:    v_readfirstlane_b32 s8, v8
+; TONGA-NEXT:    s_add_u32 s1, s1, s8
+; TONGA-NEXT:    v_mov_b32_e32 v8, s1
+; TONGA-NEXT:    v_mad_u64_u32 v[8:9], s[8:9], s6, v8, 0
+; TONGA-NEXT:    s_addc_u32 s3, 0, s3
+; TONGA-NEXT:    s_mul_i32 s3, s6, s3
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v9
+; TONGA-NEXT:    s_add_i32 s3, s14, s3
+; TONGA-NEXT:    s_mul_i32 s1, s7, s1
+; TONGA-NEXT:    s_add_i32 s3, s3, s1
+; TONGA-NEXT:    s_sub_i32 s1, s13, s3
+; TONGA-NEXT:    v_readfirstlane_b32 s14, v8
+; TONGA-NEXT:    s_sub_i32 s12, s12, s14
+; TONGA-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; TONGA-NEXT:    s_subb_u32 s1, s1, s7
+; TONGA-NEXT:    s_sub_i32 s18, s12, s6
+; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_subb_u32 s19, s1, 0
+; TONGA-NEXT:    s_cmp_ge_u32 s19, s7
+; TONGA-NEXT:    s_cselect_b32 s20, -1, 0
+; TONGA-NEXT:    s_cmp_ge_u32 s18, s6
+; TONGA-NEXT:    s_cselect_b32 s21, -1, 0
+; TONGA-NEXT:    s_cmp_eq_u32 s19, s7
+; TONGA-NEXT:    s_cselect_b32 s20, s21, s20
+; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_subb_u32 s1, s1, s7
+; TONGA-NEXT:    s_sub_i32 s21, s18, s6
+; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
+; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_subb_u32 s1, s1, 0
+; TONGA-NEXT:    s_cmp_lg_u32 s20, 0
+; TONGA-NEXT:    s_cselect_b32 s16, s21, s18
+; TONGA-NEXT:    s_cselect_b32 s1, s1, s19
+; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
+; TONGA-NEXT:    s_subb_u32 s3, s13, s3
+; TONGA-NEXT:    s_cmp_ge_u32 s3, s7
+; TONGA-NEXT:    s_cselect_b32 s13, -1, 0
+; TONGA-NEXT:    s_cmp_ge_u32 s12, s6
+; TONGA-NEXT:    s_cselect_b32 s6, -1, 0
+; TONGA-NEXT:    s_cmp_eq_u32 s3, s7
+; TONGA-NEXT:    s_cselect_b32 s6, s6, s13
+; TONGA-NEXT:    s_cmp_lg_u32 s6, 0
+; TONGA-NEXT:    s_cselect_b32 s7, s1, s3
+; TONGA-NEXT:    s_cselect_b32 s6, s16, s12
+; TONGA-NEXT:    s_xor_b64 s[6:7], s[6:7], s[10:11]
+; TONGA-NEXT:    s_sub_u32 s6, s6, s10
+; TONGA-NEXT:    s_subb_u32 s7, s7, s10
+; TONGA-NEXT:    s_cbranch_execnz .LBB12_4
 ; TONGA-NEXT:  .LBB12_2:
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v8, v10
-; TONGA-NEXT:    v_sub_u32_e32 v9, vcc, 0, v10
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v8, s0
+; TONGA-NEXT:    s_sub_i32 s1, 0, s0
 ; TONGA-NEXT:    v_rcp_iflag_f32_e32 v8, v8
 ; TONGA-NEXT:    v_mul_f32_e32 v8, 0x4f7ffffe, v8
 ; TONGA-NEXT:    v_cvt_u32_f32_e32 v8, v8
-; TONGA-NEXT:    v_mul_lo_u32 v9, v9, v8
+; TONGA-NEXT:    v_mul_lo_u32 v9, s1, v8
 ; TONGA-NEXT:    v_mul_hi_u32 v9, v8, v9
 ; TONGA-NEXT:    v_add_u32_e32 v8, vcc, v8, v9
-; TONGA-NEXT:    v_mul_hi_u32 v8, v14, v8
-; TONGA-NEXT:    v_mul_lo_u32 v8, v8, v10
-; TONGA-NEXT:    v_sub_u32_e32 v8, vcc, v14, v8
-; TONGA-NEXT:    v_subrev_u32_e32 v9, vcc, v10, v8
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v8, v10
+; TONGA-NEXT:    v_mul_hi_u32 v8, s2, v8
+; TONGA-NEXT:    v_mul_lo_u32 v8, v8, s0
+; TONGA-NEXT:    v_sub_u32_e32 v8, vcc, s2, v8
+; TONGA-NEXT:    v_subrev_u32_e32 v9, vcc, s0, v8
+; TONGA-NEXT:    v_cmp_le_u32_e32 vcc, s0, v8
 ; TONGA-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
-; TONGA-NEXT:    v_sub_u32_e32 v9, vcc, v8, v10
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v8, v10
+; TONGA-NEXT:    v_subrev_u32_e32 v9, vcc, s0, v8
+; TONGA-NEXT:    v_cmp_le_u32_e32 vcc, s0, v8
 ; TONGA-NEXT:    v_cndmask_b32_e32 v8, v8, v9, vcc
 ; TONGA-NEXT:    v_mov_b32_e32 v9, 0
+; TONGA-NEXT:    s_branch .LBB12_5
 ; TONGA-NEXT:  .LBB12_3:
+; TONGA-NEXT:    ; implicit-def: $sgpr6_sgpr7
+; TONGA-NEXT:    s_branch .LBB12_2
+; TONGA-NEXT:  .LBB12_4:
+; TONGA-NEXT:    v_mov_b32_e32 v9, s7
+; TONGA-NEXT:    v_mov_b32_e32 v8, s6
+; TONGA-NEXT:  .LBB12_5:
 ; TONGA-NEXT:    v_or_b32_e32 v11, v17, v13
 ; TONGA-NEXT:    v_mov_b32_e32 v10, 0
 ; TONGA-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
-; TONGA-NEXT:    s_cbranch_vccz .LBB12_14
-; TONGA-NEXT:  ; %bb.4:
+; TONGA-NEXT:    s_cbranch_vccz .LBB12_15
+; TONGA-NEXT:  ; %bb.6:
 ; TONGA-NEXT:    v_ashrrev_i32_e32 v10, 31, v13
 ; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v12, v10
 ; TONGA-NEXT:    v_addc_u32_e32 v13, vcc, v13, v10, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v15, v11, v10
-; TONGA-NEXT:    v_xor_b32_e32 v20, v13, v10
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v10, v15
-; TONGA-NEXT:    v_cvt_f32_u32_e32 v11, v20
-; TONGA-NEXT:    v_sub_u32_e32 v21, vcc, 0, v15
-; TONGA-NEXT:    v_subb_u32_e32 v22, vcc, 0, v20, vcc
-; TONGA-NEXT:    v_madmk_f32 v10, v11, 0x4f800000, v10
-; TONGA-NEXT:    v_rcp_f32_e32 v10, v10
-; TONGA-NEXT:    v_mul_f32_e32 v10, 0x5f7ffffc, v10
-; TONGA-NEXT:    v_mul_f32_e32 v11, 0x2f800000, v10
-; TONGA-NEXT:    v_trunc_f32_e32 v11, v11
-; TONGA-NEXT:    v_madmk_f32 v10, v11, 0xcf800000, v10
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v18, v11
-; TONGA-NEXT:    v_cvt_u32_f32_e32 v19, v10
-; TONGA-NEXT:    v_mul_lo_u32 v13, v21, v18
-; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[0:1], v21, v19, 0
-; TONGA-NEXT:    v_mul_lo_u32 v14, v22, v19
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v11, v13
-; TONGA-NEXT:    v_add_u32_e32 v23, vcc, v11, v14
-; TONGA-NEXT:    v_mad_u64_u32 v[13:14], s[0:1], v19, v23, 0
-; TONGA-NEXT:    v_mul_hi_u32 v11, v19, v10
-; TONGA-NEXT:    v_add_u32_e32 v24, vcc, v11, v13
-; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[0:1], v18, v10, 0
-; TONGA-NEXT:    v_addc_u32_e32 v25, vcc, 0, v14, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[13:14], s[0:1], v18, v23, 0
-; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v24, v10
-; TONGA-NEXT:    v_addc_u32_e32 v10, vcc, v25, v11, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v11, vcc, 0, v14, vcc
-; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v10, v13
-; TONGA-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
-; TONGA-NEXT:    v_add_u32_e32 v23, vcc, v19, v10
-; TONGA-NEXT:    v_addc_u32_e32 v24, vcc, v18, v11, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[0:1], v21, v23, 0
-; TONGA-NEXT:    v_mul_lo_u32 v18, v21, v24
-; TONGA-NEXT:    v_mul_lo_u32 v19, v22, v23
-; TONGA-NEXT:    v_mul_hi_u32 v21, v23, v10
-; TONGA-NEXT:    v_mad_u64_u32 v[13:14], s[0:1], v24, v10, 0
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v18, v11
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v11, v19
-; TONGA-NEXT:    v_mad_u64_u32 v[18:19], s[0:1], v23, v11, 0
-; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[0:1], v24, v11, 0
-; TONGA-NEXT:    v_add_u32_e32 v18, vcc, v21, v18
-; TONGA-NEXT:    v_addc_u32_e32 v19, vcc, 0, v19, vcc
-; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v18, v13
-; TONGA-NEXT:    v_addc_u32_e32 v13, vcc, v19, v14, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
-; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v13, v10
-; TONGA-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
-; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v23, v10
-; TONGA-NEXT:    v_addc_u32_e32 v14, vcc, v24, v11, vcc
-; TONGA-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
-; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v16, v18
-; TONGA-NEXT:    v_xor_b32_e32 v19, v10, v18
-; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[0:1], v19, v14, 0
-; TONGA-NEXT:    v_mul_hi_u32 v21, v19, v13
-; TONGA-NEXT:    v_addc_u32_e32 v17, vcc, v17, v18, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v17, v17, v18
-; TONGA-NEXT:    v_add_u32_e32 v21, vcc, v21, v10
-; TONGA-NEXT:    v_addc_u32_e32 v22, vcc, 0, v11, vcc
-; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[0:1], v17, v13, 0
-; TONGA-NEXT:    v_mad_u64_u32 v[13:14], s[0:1], v17, v14, 0
-; TONGA-NEXT:    v_add_u32_e32 v10, vcc, v21, v10
-; TONGA-NEXT:    v_addc_u32_e32 v10, vcc, v22, v11, vcc
-; TONGA-NEXT:    v_addc_u32_e32 v11, vcc, 0, v14, vcc
-; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v10, v13
-; TONGA-NEXT:    v_addc_u32_e32 v10, vcc, 0, v11, vcc
-; TONGA-NEXT:    v_mul_lo_u32 v14, v15, v10
-; TONGA-NEXT:    v_mad_u64_u32 v[10:11], s[0:1], v15, v13, 0
-; TONGA-NEXT:    v_mul_lo_u32 v13, v20, v13
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v14, v11
-; TONGA-NEXT:    v_add_u32_e32 v11, vcc, v13, v11
-; TONGA-NEXT:    v_sub_u32_e32 v13, vcc, v17, v11
-; TONGA-NEXT:    v_sub_u32_e32 v10, vcc, v19, v10
-; TONGA-NEXT:    v_subb_u32_e64 v13, s[0:1], v13, v20, vcc
-; TONGA-NEXT:    v_sub_u32_e64 v14, s[0:1], v10, v15
-; TONGA-NEXT:    v_subbrev_u32_e64 v19, s[2:3], 0, v13, s[0:1]
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v19, v20
+; TONGA-NEXT:    v_xor_b32_e32 v11, v11, v10
+; TONGA-NEXT:    v_xor_b32_e32 v10, v13, v10
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v13, v11
+; TONGA-NEXT:    v_cvt_f32_u32_e32 v14, v10
+; TONGA-NEXT:    v_sub_u32_e32 v22, vcc, 0, v11
+; TONGA-NEXT:    v_subb_u32_e32 v23, vcc, 0, v10, vcc
+; TONGA-NEXT:    v_madmk_f32 v13, v14, 0x4f800000, v13
+; TONGA-NEXT:    v_rcp_f32_e32 v13, v13
+; TONGA-NEXT:    v_mul_f32_e32 v13, 0x5f7ffffc, v13
+; TONGA-NEXT:    v_mul_f32_e32 v14, 0x2f800000, v13
+; TONGA-NEXT:    v_trunc_f32_e32 v14, v14
+; TONGA-NEXT:    v_madmk_f32 v13, v14, 0xcf800000, v13
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v20, v14
+; TONGA-NEXT:    v_cvt_u32_f32_e32 v21, v13
+; TONGA-NEXT:    v_mul_lo_u32 v15, v22, v20
+; TONGA-NEXT:    v_mad_u64_u32 v[13:14], s[0:1], v22, v21, 0
+; TONGA-NEXT:    v_mul_lo_u32 v18, v23, v21
+; TONGA-NEXT:    v_add_u32_e32 v14, vcc, v14, v15
+; TONGA-NEXT:    v_add_u32_e32 v18, vcc, v14, v18
+; TONGA-NEXT:    v_mad_u64_u32 v[14:15], s[0:1], v21, v18, 0
+; TONGA-NEXT:    v_mul_hi_u32 v19, v21, v13
+; TONGA-NEXT:    v_add_u32_e32 v24, vcc, v19, v14
+; TONGA-NEXT:    v_mad_u64_u32 v[13:14], s[0:1], v20, v13, 0
+; TONGA-NEXT:    v_mad_u64_u32 v[18:19], s[0:1], v20, v18, 0
+; TONGA-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v24, v13
+; TONGA-NEXT:    v_addc_u32_e32 v13, vcc, v15, v14, vcc
+; TONGA-NEXT:    v_addc_u32_e32 v14, vcc, 0, v19, vcc
+; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v13, v18
+; TONGA-NEXT:    v_addc_u32_e32 v14, vcc, 0, v14, vcc
+; TONGA-NEXT:    v_add_u32_e32 v24, vcc, v21, v13
+; TONGA-NEXT:    v_addc_u32_e32 v25, vcc, v20, v14, vcc
+; TONGA-NEXT:    v_mad_u64_u32 v[13:14], s[0:1], v22, v24, 0
+; TONGA-NEXT:    v_mul_lo_u32 v15, v22, v25
+; TONGA-NEXT:    v_mul_lo_u32 v20, v23, v24
+; TONGA-NEXT:    v_mad_u64_u32 v[18:19], s[0:1], v25, v13, 0
+; TONGA-NEXT:    v_add_u32_e32 v14, vcc, v15, v14
+; TONGA-NEXT:    v_add_u32_e32 v20, vcc, v20, v14
+; TONGA-NEXT:    v_mad_u64_u32 v[14:15], s[0:1], v24, v20, 0
+; TONGA-NEXT:    v_mul_hi_u32 v13, v24, v13
+; TONGA-NEXT:    v_mad_u64_u32 v[20:21], s[0:1], v25, v20, 0
+; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v13, v14
+; TONGA-NEXT:    v_addc_u32_e32 v14, vcc, 0, v15, vcc
+; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v13, v18
+; TONGA-NEXT:    v_addc_u32_e32 v13, vcc, v14, v19, vcc
+; TONGA-NEXT:    v_addc_u32_e32 v14, vcc, 0, v21, vcc
+; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v13, v20
+; TONGA-NEXT:    v_addc_u32_e32 v14, vcc, 0, v14, vcc
+; TONGA-NEXT:    v_add_u32_e32 v15, vcc, v24, v13
+; TONGA-NEXT:    v_addc_u32_e32 v18, vcc, v25, v14, vcc
+; TONGA-NEXT:    v_ashrrev_i32_e32 v19, 31, v17
+; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v16, v19
+; TONGA-NEXT:    v_xor_b32_e32 v20, v13, v19
+; TONGA-NEXT:    v_mad_u64_u32 v[13:14], s[0:1], v20, v18, 0
+; TONGA-NEXT:    v_mul_hi_u32 v21, v20, v15
+; TONGA-NEXT:    v_addc_u32_e32 v17, vcc, v17, v19, vcc
+; TONGA-NEXT:    v_xor_b32_e32 v22, v17, v19
+; TONGA-NEXT:    v_add_u32_e32 v21, vcc, v21, v13
+; TONGA-NEXT:    v_addc_u32_e32 v23, vcc, 0, v14, vcc
+; TONGA-NEXT:    v_mad_u64_u32 v[13:14], s[0:1], v22, v15, 0
+; TONGA-NEXT:    v_mad_u64_u32 v[17:18], s[0:1], v22, v18, 0
+; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v21, v13
+; TONGA-NEXT:    v_addc_u32_e32 v13, vcc, v23, v14, vcc
+; TONGA-NEXT:    v_addc_u32_e32 v14, vcc, 0, v18, vcc
+; TONGA-NEXT:    v_add_u32_e32 v15, vcc, v13, v17
+; TONGA-NEXT:    v_addc_u32_e32 v13, vcc, 0, v14, vcc
+; TONGA-NEXT:    v_mul_lo_u32 v17, v11, v13
+; TONGA-NEXT:    v_mad_u64_u32 v[13:14], s[0:1], v11, v15, 0
+; TONGA-NEXT:    v_mul_lo_u32 v15, v10, v15
+; TONGA-NEXT:    v_add_u32_e32 v14, vcc, v17, v14
+; TONGA-NEXT:    v_add_u32_e32 v14, vcc, v15, v14
+; TONGA-NEXT:    v_sub_u32_e32 v15, vcc, v22, v14
+; TONGA-NEXT:    v_sub_u32_e32 v13, vcc, v20, v13
+; TONGA-NEXT:    v_subb_u32_e64 v15, s[0:1], v15, v10, vcc
+; TONGA-NEXT:    v_sub_u32_e64 v17, s[0:1], v13, v11
+; TONGA-NEXT:    v_subbrev_u32_e64 v18, s[2:3], 0, v15, s[0:1]
+; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v18, v10
+; TONGA-NEXT:    v_cndmask_b32_e64 v20, 0, -1, s[2:3]
+; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v17, v11
 ; TONGA-NEXT:    v_cndmask_b32_e64 v21, 0, -1, s[2:3]
-; TONGA-NEXT:    v_cmp_ge_u32_e64 s[2:3], v14, v15
-; TONGA-NEXT:    v_subb_u32_e32 v11, vcc, v17, v11, vcc
-; TONGA-NEXT:    v_cndmask_b32_e64 v22, 0, -1, s[2:3]
-; TONGA-NEXT:    v_cmp_eq_u32_e64 s[2:3], v19, v20
-; TONGA-NEXT:    v_subb_u32_e64 v13, s[0:1], v13, v20, s[0:1]
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v11, v20
-; TONGA-NEXT:    v_cndmask_b32_e64 v21, v21, v22, s[2:3]
-; TONGA-NEXT:    v_sub_u32_e64 v22, s[0:1], v14, v15
-; TONGA-NEXT:    v_cndmask_b32_e64 v17, 0, -1, vcc
-; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v10, v15
-; TONGA-NEXT:    v_subbrev_u32_e64 v13, s[0:1], 0, v13, s[0:1]
-; TONGA-NEXT:    v_cndmask_b32_e64 v15, 0, -1, vcc
-; TONGA-NEXT:    v_cmp_eq_u32_e32 vcc, v11, v20
-; TONGA-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v21
-; TONGA-NEXT:    v_cndmask_b32_e32 v15, v17, v15, vcc
-; TONGA-NEXT:    v_cndmask_b32_e64 v14, v14, v22, s[0:1]
-; TONGA-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v15
-; TONGA-NEXT:    v_cndmask_b32_e64 v13, v19, v13, s[0:1]
-; TONGA-NEXT:    v_cndmask_b32_e32 v10, v10, v14, vcc
-; TONGA-NEXT:    v_cndmask_b32_e32 v11, v11, v13, vcc
-; TONGA-NEXT:    v_xor_b32_e32 v10, v10, v18
-; TONGA-NEXT:    v_xor_b32_e32 v11, v11, v18
-; TONGA-NEXT:    v_sub_u32_e32 v10, vcc, v10, v18
-; TONGA-NEXT:    v_subb_u32_e32 v11, vcc, v11, v18, vcc
-; TONGA-NEXT:    s_cbranch_execnz .LBB12_6
-; TONGA-NEXT:  .LBB12_5:
+; TONGA-NEXT:    v_cmp_eq_u32_e64 s[2:3], v18, v10
+; TONGA-NEXT:    v_subb_u32_e64 v15, s[0:1], v15, v10, s[0:1]
+; TONGA-NEXT:    v_cndmask_b32_e64 v20, v20, v21, s[2:3]
+; TONGA-NEXT:    v_sub_u32_e64 v21, s[0:1], v17, v11
+; TONGA-NEXT:    v_subbrev_u32_e64 v15, s[0:1], 0, v15, s[0:1]
+; TONGA-NEXT:    v_subb_u32_e32 v14, vcc, v22, v14, vcc
+; TONGA-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v20
+; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v14, v10
+; TONGA-NEXT:    v_cndmask_b32_e64 v15, v18, v15, s[0:1]
+; TONGA-NEXT:    v_cndmask_b32_e64 v18, 0, -1, vcc
+; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v13, v11
+; TONGA-NEXT:    v_cndmask_b32_e64 v11, 0, -1, vcc
+; TONGA-NEXT:    v_cmp_eq_u32_e32 vcc, v14, v10
+; TONGA-NEXT:    v_cndmask_b32_e32 v10, v18, v11, vcc
+; TONGA-NEXT:    v_cndmask_b32_e64 v17, v17, v21, s[0:1]
+; TONGA-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v10
+; TONGA-NEXT:    v_cndmask_b32_e32 v11, v13, v17, vcc
+; TONGA-NEXT:    v_cndmask_b32_e32 v10, v14, v15, vcc
+; TONGA-NEXT:    v_xor_b32_e32 v11, v11, v19
+; TONGA-NEXT:    v_xor_b32_e32 v13, v10, v19
+; TONGA-NEXT:    v_sub_u32_e32 v10, vcc, v11, v19
+; TONGA-NEXT:    v_subb_u32_e32 v11, vcc, v13, v19, vcc
+; TONGA-NEXT:    s_cbranch_execnz .LBB12_8
+; TONGA-NEXT:  .LBB12_7:
 ; TONGA-NEXT:    v_cvt_f32_u32_e32 v10, v12
 ; TONGA-NEXT:    v_sub_u32_e32 v11, vcc, 0, v12
 ; TONGA-NEXT:    v_rcp_iflag_f32_e32 v10, v10
@@ -6370,13 +6492,13 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v10, v12
 ; TONGA-NEXT:    v_cndmask_b32_e32 v10, v10, v11, vcc
 ; TONGA-NEXT:    v_mov_b32_e32 v11, 0
-; TONGA-NEXT:  .LBB12_6:
+; TONGA-NEXT:  .LBB12_8:
 ; TONGA-NEXT:    s_waitcnt vmcnt(0)
 ; TONGA-NEXT:    v_or_b32_e32 v13, v5, v1
 ; TONGA-NEXT:    v_mov_b32_e32 v12, 0
 ; TONGA-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[12:13]
-; TONGA-NEXT:    s_cbranch_vccz .LBB12_15
-; TONGA-NEXT:  ; %bb.7:
+; TONGA-NEXT:    s_cbranch_vccz .LBB12_16
+; TONGA-NEXT:  ; %bb.9:
 ; TONGA-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
 ; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v0, v12
 ; TONGA-NEXT:    v_addc_u32_e32 v1, vcc, v1, v12, vcc
@@ -6418,7 +6540,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_mul_hi_u32 v19, v21, v12
 ; TONGA-NEXT:    v_mad_u64_u32 v[14:15], s[0:1], v22, v12, 0
 ; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v16, v13
-; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v13, v17
+; TONGA-NEXT:    v_add_u32_e32 v13, vcc, v17, v13
 ; TONGA-NEXT:    v_mad_u64_u32 v[16:17], s[0:1], v21, v13, 0
 ; TONGA-NEXT:    v_mad_u64_u32 v[12:13], s[0:1], v22, v13, 0
 ; TONGA-NEXT:    v_add_u32_e32 v16, vcc, v19, v16
@@ -6482,8 +6604,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_xor_b32_e32 v1, v1, v16
 ; TONGA-NEXT:    v_sub_u32_e32 v12, vcc, v5, v16
 ; TONGA-NEXT:    v_subb_u32_e32 v13, vcc, v1, v16, vcc
-; TONGA-NEXT:    s_cbranch_execnz .LBB12_9
-; TONGA-NEXT:  .LBB12_8:
+; TONGA-NEXT:    s_cbranch_execnz .LBB12_11
+; TONGA-NEXT:  .LBB12_10:
 ; TONGA-NEXT:    v_cvt_f32_u32_e32 v1, v0
 ; TONGA-NEXT:    v_sub_u32_e32 v5, vcc, 0, v0
 ; TONGA-NEXT:    v_mov_b32_e32 v13, 0
@@ -6502,12 +6624,12 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_subrev_u32_e32 v4, vcc, v0, v1
 ; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v1, v0
 ; TONGA-NEXT:    v_cndmask_b32_e32 v12, v1, v4, vcc
-; TONGA-NEXT:  .LBB12_9:
+; TONGA-NEXT:  .LBB12_11:
 ; TONGA-NEXT:    v_or_b32_e32 v1, v7, v3
 ; TONGA-NEXT:    v_mov_b32_e32 v0, 0
 ; TONGA-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; TONGA-NEXT:    s_cbranch_vccz .LBB12_16
-; TONGA-NEXT:  ; %bb.10:
+; TONGA-NEXT:    s_cbranch_vccz .LBB12_17
+; TONGA-NEXT:  ; %bb.12:
 ; TONGA-NEXT:    v_ashrrev_i32_e32 v0, 31, v3
 ; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v2, v0
 ; TONGA-NEXT:    v_addc_u32_e32 v3, vcc, v3, v0, vcc
@@ -6549,7 +6671,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_mul_hi_u32 v17, v19, v0
 ; TONGA-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v20, v0, 0
 ; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v14, v1
-; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v1, v15
+; TONGA-NEXT:    v_add_u32_e32 v1, vcc, v15, v1
 ; TONGA-NEXT:    v_mad_u64_u32 v[14:15], s[0:1], v19, v1, 0
 ; TONGA-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], v20, v1, 0
 ; TONGA-NEXT:    v_add_u32_e32 v14, vcc, v17, v14
@@ -6613,8 +6735,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_xor_b32_e32 v1, v1, v15
 ; TONGA-NEXT:    v_sub_u32_e32 v14, vcc, v0, v15
 ; TONGA-NEXT:    v_subb_u32_e32 v15, vcc, v1, v15, vcc
-; TONGA-NEXT:    s_cbranch_execnz .LBB12_12
-; TONGA-NEXT:  .LBB12_11:
+; TONGA-NEXT:    s_cbranch_execnz .LBB12_14
+; TONGA-NEXT:  .LBB12_13:
 ; TONGA-NEXT:    v_cvt_f32_u32_e32 v0, v2
 ; TONGA-NEXT:    v_sub_u32_e32 v1, vcc, 0, v2
 ; TONGA-NEXT:    v_mov_b32_e32 v15, 0
@@ -6633,7 +6755,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_subrev_u32_e32 v1, vcc, v2, v0
 ; TONGA-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v2
 ; TONGA-NEXT:    v_cndmask_b32_e32 v14, v0, v1, vcc
-; TONGA-NEXT:  .LBB12_12:
+; TONGA-NEXT:  .LBB12_14:
 ; TONGA-NEXT:    v_mov_b32_e32 v0, s4
 ; TONGA-NEXT:    v_mov_b32_e32 v1, s5
 ; TONGA-NEXT:    s_add_u32 s0, s4, 16
@@ -6643,16 +6765,13 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_mov_b32_e32 v1, s1
 ; TONGA-NEXT:    flat_store_dwordx4 v[0:1], v[12:15]
 ; TONGA-NEXT:    s_endpgm
-; TONGA-NEXT:  .LBB12_13:
-; TONGA-NEXT:    ; implicit-def: $vgpr8_vgpr9
-; TONGA-NEXT:    s_branch .LBB12_2
-; TONGA-NEXT:  .LBB12_14:
-; TONGA-NEXT:    s_branch .LBB12_5
 ; TONGA-NEXT:  .LBB12_15:
-; TONGA-NEXT:    ; implicit-def: $vgpr12_vgpr13
-; TONGA-NEXT:    s_branch .LBB12_8
+; TONGA-NEXT:    s_branch .LBB12_7
 ; TONGA-NEXT:  .LBB12_16:
-; TONGA-NEXT:    s_branch .LBB12_11
+; TONGA-NEXT:    ; implicit-def: $vgpr12_vgpr13
+; TONGA-NEXT:    s_branch .LBB12_10
+; TONGA-NEXT:  .LBB12_17:
+; TONGA-NEXT:    s_branch .LBB12_13
 ;
 ; EG-LABEL: srem_v4i64:
 ; EG:       ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 5db2916bff36a..03bb85ec0e91a 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -5,119 +5,159 @@
 define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_srem:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0xd
-; GCN-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x9
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0xd
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s12
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s13
-; GCN-NEXT:    s_sub_u32 s0, 0, s12
-; GCN-NEXT:    s_subb_u32 s1, 0, s13
-; GCN-NEXT:    s_mov_b32 s4, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s9
+; GCN-NEXT:    s_sub_u32 s10, 0, s8
+; GCN-NEXT:    s_subb_u32 s11, 0, s9
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    s_mov_b32 s5, s9
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_lo_u32 v2, s0, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s0, v0
-; GCN-NEXT:    v_mul_lo_u32 v5, s1, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s0, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v6, v1, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v8, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s0, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s0, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s0, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
-; GCN-NEXT:    v_mul_hi_u32 v4, s10, v1
-; GCN-NEXT:    v_mul_hi_u32 v5, s11, v1
-; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s11, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v1, s12, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, s12, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, s13, v0
-; GCN-NEXT:    v_mul_lo_u32 v0, s12, v0
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s11, v1
-; GCN-NEXT:    v_mov_b32_e32 v3, s13
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s10, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s12, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v4
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s12, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v4, v3, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v4, s11
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v4, v1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_mul_hi_u32 v2, s10, v0
+; GCN-NEXT:    v_readfirstlane_b32 s12, v1
+; GCN-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-NEXT:    s_mul_i32 s1, s10, s12
+; GCN-NEXT:    v_readfirstlane_b32 s15, v2
+; GCN-NEXT:    s_mul_i32 s13, s11, s0
+; GCN-NEXT:    s_mul_i32 s14, s10, s0
+; GCN-NEXT:    s_add_i32 s1, s15, s1
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s14
+; GCN-NEXT:    s_add_i32 s1, s1, s13
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s1
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s14
+; GCN-NEXT:    v_readfirstlane_b32 s13, v3
+; GCN-NEXT:    s_mul_i32 s15, s0, s1
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s1
+; GCN-NEXT:    s_add_u32 s13, s13, s15
+; GCN-NEXT:    v_readfirstlane_b32 s15, v0
+; GCN-NEXT:    s_mul_i32 s14, s12, s14
+; GCN-NEXT:    s_addc_u32 s15, 0, s15
+; GCN-NEXT:    v_readfirstlane_b32 s16, v4
+; GCN-NEXT:    s_add_u32 s13, s13, s14
+; GCN-NEXT:    s_addc_u32 s13, s15, s16
+; GCN-NEXT:    v_readfirstlane_b32 s14, v1
+; GCN-NEXT:    s_addc_u32 s14, s14, 0
+; GCN-NEXT:    s_mul_i32 s1, s12, s1
+; GCN-NEXT:    s_add_u32 s1, s13, s1
+; GCN-NEXT:    s_addc_u32 s13, 0, s14
+; GCN-NEXT:    s_add_i32 s14, s0, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    v_mul_hi_u32 v0, s10, v0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_addc_u32 s12, s12, s13
+; GCN-NEXT:    s_mul_i32 s0, s10, s12
+; GCN-NEXT:    v_readfirstlane_b32 s1, v0
+; GCN-NEXT:    s_add_i32 s0, s1, s0
+; GCN-NEXT:    s_mul_i32 s11, s11, s14
+; GCN-NEXT:    s_mul_i32 s1, s10, s14
+; GCN-NEXT:    s_add_i32 s0, s0, s11
+; GCN-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mul_hi_u32 v3, s12, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s14, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s12, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s14, v0
+; GCN-NEXT:    s_mul_i32 s11, s14, s0
+; GCN-NEXT:    v_readfirstlane_b32 s15, v2
+; GCN-NEXT:    s_add_u32 s11, s15, s11
+; GCN-NEXT:    v_readfirstlane_b32 s13, v0
+; GCN-NEXT:    s_mul_i32 s1, s12, s1
+; GCN-NEXT:    s_addc_u32 s13, 0, s13
+; GCN-NEXT:    v_readfirstlane_b32 s10, v3
+; GCN-NEXT:    s_add_u32 s1, s11, s1
+; GCN-NEXT:    s_addc_u32 s1, s13, s10
+; GCN-NEXT:    v_readfirstlane_b32 s10, v1
+; GCN-NEXT:    s_addc_u32 s10, s10, 0
+; GCN-NEXT:    s_mul_i32 s0, s12, s0
+; GCN-NEXT:    s_add_u32 s0, s1, s0
+; GCN-NEXT:    s_addc_u32 s10, 0, s10
+; GCN-NEXT:    s_add_i32 s11, s14, s0
+; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_addc_u32 s1, s12, s10
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mul_hi_u32 v1, s6, v0
+; GCN-NEXT:    v_mov_b32_e32 v2, s11
+; GCN-NEXT:    v_mul_hi_u32 v3, s6, v2
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    v_readfirstlane_b32 s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v1, s7, v2
+; GCN-NEXT:    s_mul_i32 s4, s6, s1
+; GCN-NEXT:    v_readfirstlane_b32 s12, v3
+; GCN-NEXT:    v_mul_hi_u32 v0, s7, v0
+; GCN-NEXT:    s_add_u32 s4, s12, s4
+; GCN-NEXT:    s_addc_u32 s10, 0, s10
+; GCN-NEXT:    s_mul_i32 s11, s7, s11
+; GCN-NEXT:    v_readfirstlane_b32 s12, v1
+; GCN-NEXT:    s_add_u32 s4, s4, s11
+; GCN-NEXT:    s_addc_u32 s4, s10, s12
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    s_addc_u32 s10, s10, 0
+; GCN-NEXT:    s_mul_i32 s1, s7, s1
+; GCN-NEXT:    s_add_u32 s4, s4, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mul_hi_u32 v0, s8, v0
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_addc_u32 s5, 0, s10
+; GCN-NEXT:    s_mul_i32 s5, s8, s5
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    s_add_i32 s5, s10, s5
+; GCN-NEXT:    s_mul_i32 s10, s9, s4
+; GCN-NEXT:    s_add_i32 s10, s5, s10
+; GCN-NEXT:    s_sub_i32 s11, s7, s10
+; GCN-NEXT:    s_mul_i32 s4, s8, s4
+; GCN-NEXT:    s_sub_i32 s6, s6, s4
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_or_b32 s12, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s12, 0
+; GCN-NEXT:    s_subb_u32 s11, s11, s9
+; GCN-NEXT:    s_sub_i32 s13, s6, s8
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_or_b32 s4, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_subb_u32 s14, s11, 0
+; GCN-NEXT:    s_cmp_ge_u32 s14, s9
+; GCN-NEXT:    s_cselect_b32 s5, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s13, s8
+; GCN-NEXT:    s_cselect_b32 s15, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s14, s9
+; GCN-NEXT:    s_cselect_b32 s15, s15, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_subb_u32 s11, s11, s9
+; GCN-NEXT:    s_sub_i32 s16, s13, s8
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_or_b32 s4, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_subb_u32 s4, s11, 0
+; GCN-NEXT:    s_cmp_lg_u32 s15, 0
+; GCN-NEXT:    s_cselect_b32 s5, s16, s13
+; GCN-NEXT:    s_cselect_b32 s4, s4, s14
+; GCN-NEXT:    s_cmp_lg_u32 s12, 0
+; GCN-NEXT:    s_subb_u32 s7, s7, s10
+; GCN-NEXT:    s_cmp_ge_u32 s7, s9
+; GCN-NEXT:    s_cselect_b32 s10, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s6, s8
+; GCN-NEXT:    s_cselect_b32 s8, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s7, s9
+; GCN-NEXT:    s_cselect_b32 s8, s8, s10
+; GCN-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-NEXT:    s_cselect_b32 s4, s4, s7
+; GCN-NEXT:    s_cselect_b32 s5, s5, s6
+; GCN-NEXT:    v_mov_b32_e32 v0, s5
+; GCN-NEXT:    v_mov_b32_e32 v1, s4
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_srem:
@@ -920,133 +960,169 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; GCN-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0xd
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    s_ashr_i64 s[2:3], s[2:3], 31
-; GCN-NEXT:    s_ashr_i64 s[4:5], s[4:5], 31
-; GCN-NEXT:    s_ashr_i32 s6, s5, 31
-; GCN-NEXT:    s_add_u32 s4, s4, s6
-; GCN-NEXT:    s_mov_b32 s7, s6
-; GCN-NEXT:    s_addc_u32 s5, s5, s6
-; GCN-NEXT:    s_xor_b64 s[8:9], s[4:5], s[6:7]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s9
-; GCN-NEXT:    s_sub_u32 s4, 0, s8
-; GCN-NEXT:    s_subb_u32 s5, 0, s9
-; GCN-NEXT:    s_ashr_i32 s10, s3, 31
+; GCN-NEXT:    s_ashr_i64 s[6:7], s[2:3], 31
+; GCN-NEXT:    s_ashr_i64 s[2:3], s[4:5], 31
+; GCN-NEXT:    s_ashr_i32 s4, s3, 31
+; GCN-NEXT:    s_add_u32 s2, s2, s4
+; GCN-NEXT:    s_mov_b32 s5, s4
+; GCN-NEXT:    s_addc_u32 s3, s3, s4
+; GCN-NEXT:    s_xor_b64 s[4:5], s[2:3], s[4:5]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-NEXT:    s_sub_u32 s10, 0, s4
+; GCN-NEXT:    s_subb_u32 s11, 0, s5
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    s_add_u32 s2, s2, s10
-; GCN-NEXT:    s_mov_b32 s11, s10
-; GCN-NEXT:    s_addc_u32 s3, s3, s10
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    s_xor_b64 s[12:13], s[2:3], s[10:11]
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v5, s5, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s4, v0
-; GCN-NEXT:    s_mov_b32 s6, -1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GCN-NEXT:    v_mul_lo_u32 v6, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v4
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GCN-NEXT:    v_mul_hi_u32 v7, v1, v2
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v4, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s5, v0
-; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s12, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s12, v0
-; GCN-NEXT:    v_mul_hi_u32 v4, s12, v1
-; GCN-NEXT:    v_mul_hi_u32 v5, s13, v1
-; GCN-NEXT:    v_mul_lo_u32 v1, s13, v1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s13, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s13, v0
-; GCN-NEXT:    s_mov_b32 s4, s0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v1, s8, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, s8, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, s9, v0
-; GCN-NEXT:    v_mul_lo_u32 v0, s8, v0
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s13, v1
-; GCN-NEXT:    v_mov_b32_e32 v3, s9
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s12, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s8, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s9, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s8, v4
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s9, v5
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s8, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v4, v3, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v4, s13
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v4, v1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s9, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s8, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s9, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_xor_b32_e32 v0, s10, v0
-; GCN-NEXT:    v_xor_b32_e32 v1, s10, v1
-; GCN-NEXT:    v_mov_b32_e32 v2, s10
-; GCN-NEXT:    v_subrev_i32_e32 v0, vcc, s10, v0
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_mul_hi_u32 v2, s10, v0
+; GCN-NEXT:    v_readfirstlane_b32 s12, v1
+; GCN-NEXT:    v_readfirstlane_b32 s8, v0
+; GCN-NEXT:    s_mul_i32 s9, s10, s12
+; GCN-NEXT:    v_readfirstlane_b32 s15, v2
+; GCN-NEXT:    s_mul_i32 s13, s11, s8
+; GCN-NEXT:    s_mul_i32 s14, s10, s8
+; GCN-NEXT:    s_add_i32 s9, s15, s9
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s14
+; GCN-NEXT:    s_add_i32 s9, s9, s13
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s9
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s14
+; GCN-NEXT:    v_readfirstlane_b32 s13, v3
+; GCN-NEXT:    s_mul_i32 s15, s8, s9
+; GCN-NEXT:    s_add_u32 s13, s13, s15
+; GCN-NEXT:    v_readfirstlane_b32 s15, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, v1, s9
+; GCN-NEXT:    s_addc_u32 s15, 0, s15
+; GCN-NEXT:    s_mul_i32 s14, s12, s14
+; GCN-NEXT:    v_readfirstlane_b32 s16, v4
+; GCN-NEXT:    s_add_u32 s13, s13, s14
+; GCN-NEXT:    s_addc_u32 s13, s15, s16
+; GCN-NEXT:    v_readfirstlane_b32 s14, v0
+; GCN-NEXT:    s_addc_u32 s14, s14, 0
+; GCN-NEXT:    s_mul_i32 s9, s12, s9
+; GCN-NEXT:    s_add_u32 s9, s13, s9
+; GCN-NEXT:    s_addc_u32 s13, 0, s14
+; GCN-NEXT:    s_add_i32 s14, s8, s9
+; GCN-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GCN-NEXT:    v_mul_hi_u32 v0, s10, v0
+; GCN-NEXT:    s_or_b32 s8, s8, s9
+; GCN-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-NEXT:    s_addc_u32 s12, s12, s13
+; GCN-NEXT:    s_mul_i32 s8, s10, s12
+; GCN-NEXT:    v_readfirstlane_b32 s9, v0
+; GCN-NEXT:    s_add_i32 s8, s9, s8
+; GCN-NEXT:    s_mul_i32 s11, s11, s14
+; GCN-NEXT:    s_mul_i32 s9, s10, s14
+; GCN-NEXT:    s_add_i32 s8, s8, s11
+; GCN-NEXT:    v_mov_b32_e32 v2, s9
+; GCN-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-NEXT:    v_mul_hi_u32 v3, s12, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s14, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s12, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s14, v0
+; GCN-NEXT:    s_mul_i32 s11, s14, s8
+; GCN-NEXT:    v_readfirstlane_b32 s15, v2
+; GCN-NEXT:    s_add_u32 s11, s15, s11
+; GCN-NEXT:    v_readfirstlane_b32 s13, v0
+; GCN-NEXT:    s_mul_i32 s9, s12, s9
+; GCN-NEXT:    s_addc_u32 s13, 0, s13
+; GCN-NEXT:    v_readfirstlane_b32 s10, v3
+; GCN-NEXT:    s_add_u32 s9, s11, s9
+; GCN-NEXT:    s_addc_u32 s9, s13, s10
+; GCN-NEXT:    v_readfirstlane_b32 s10, v1
+; GCN-NEXT:    s_addc_u32 s10, s10, 0
+; GCN-NEXT:    s_mul_i32 s8, s12, s8
+; GCN-NEXT:    s_add_u32 s8, s9, s8
+; GCN-NEXT:    s_addc_u32 s10, 0, s10
+; GCN-NEXT:    s_add_i32 s11, s14, s8
+; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GCN-NEXT:    s_or_b32 s8, s8, s9
+; GCN-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-NEXT:    s_addc_u32 s10, s12, s10
+; GCN-NEXT:    s_ashr_i32 s8, s7, 31
+; GCN-NEXT:    s_add_u32 s6, s6, s8
+; GCN-NEXT:    s_mov_b32 s9, s8
+; GCN-NEXT:    s_addc_u32 s7, s7, s8
+; GCN-NEXT:    s_xor_b64 s[6:7], s[6:7], s[8:9]
+; GCN-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-NEXT:    v_mul_hi_u32 v1, s6, v0
+; GCN-NEXT:    v_mov_b32_e32 v2, s11
+; GCN-NEXT:    v_mul_hi_u32 v3, s6, v2
+; GCN-NEXT:    s_mul_i32 s12, s6, s10
+; GCN-NEXT:    v_readfirstlane_b32 s13, v1
+; GCN-NEXT:    v_mul_hi_u32 v1, s7, v2
+; GCN-NEXT:    v_readfirstlane_b32 s14, v3
+; GCN-NEXT:    v_mul_hi_u32 v0, s7, v0
+; GCN-NEXT:    s_add_u32 s12, s14, s12
+; GCN-NEXT:    s_addc_u32 s13, 0, s13
+; GCN-NEXT:    s_mul_i32 s11, s7, s11
+; GCN-NEXT:    v_readfirstlane_b32 s14, v1
+; GCN-NEXT:    s_add_u32 s11, s12, s11
+; GCN-NEXT:    s_addc_u32 s11, s13, s14
+; GCN-NEXT:    v_readfirstlane_b32 s12, v0
+; GCN-NEXT:    s_addc_u32 s12, s12, 0
+; GCN-NEXT:    s_mul_i32 s10, s7, s10
+; GCN-NEXT:    s_add_u32 s10, s11, s10
+; GCN-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-NEXT:    v_mul_hi_u32 v0, s4, v0
+; GCN-NEXT:    s_addc_u32 s11, 0, s12
+; GCN-NEXT:    s_mul_i32 s11, s4, s11
+; GCN-NEXT:    v_readfirstlane_b32 s12, v0
+; GCN-NEXT:    s_add_i32 s11, s12, s11
+; GCN-NEXT:    s_mul_i32 s12, s5, s10
+; GCN-NEXT:    s_add_i32 s12, s11, s12
+; GCN-NEXT:    s_sub_i32 s13, s7, s12
+; GCN-NEXT:    s_mul_i32 s10, s4, s10
+; GCN-NEXT:    s_sub_i32 s6, s6, s10
+; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GCN-NEXT:    s_or_b32 s14, s10, s11
+; GCN-NEXT:    s_cmp_lg_u32 s14, 0
+; GCN-NEXT:    s_subb_u32 s13, s13, s5
+; GCN-NEXT:    s_sub_i32 s15, s6, s4
+; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GCN-NEXT:    s_or_b32 s10, s10, s11
+; GCN-NEXT:    s_cmp_lg_u32 s10, 0
+; GCN-NEXT:    s_subb_u32 s16, s13, 0
+; GCN-NEXT:    s_cmp_ge_u32 s16, s5
+; GCN-NEXT:    s_cselect_b32 s11, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s15, s4
+; GCN-NEXT:    s_cselect_b32 s17, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s16, s5
+; GCN-NEXT:    s_cselect_b32 s17, s17, s11
+; GCN-NEXT:    s_cmp_lg_u32 s10, 0
+; GCN-NEXT:    s_subb_u32 s13, s13, s5
+; GCN-NEXT:    s_sub_i32 s18, s15, s4
+; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GCN-NEXT:    s_or_b32 s10, s10, s11
+; GCN-NEXT:    s_cmp_lg_u32 s10, 0
+; GCN-NEXT:    s_subb_u32 s10, s13, 0
+; GCN-NEXT:    s_cmp_lg_u32 s17, 0
+; GCN-NEXT:    s_cselect_b32 s11, s18, s15
+; GCN-NEXT:    s_cselect_b32 s10, s10, s16
+; GCN-NEXT:    s_cmp_lg_u32 s14, 0
+; GCN-NEXT:    s_subb_u32 s7, s7, s12
+; GCN-NEXT:    s_cmp_ge_u32 s7, s5
+; GCN-NEXT:    s_cselect_b32 s12, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s6, s4
+; GCN-NEXT:    s_cselect_b32 s4, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s7, s5
+; GCN-NEXT:    s_cselect_b32 s4, s4, s12
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_cselect_b32 s5, s10, s7
+; GCN-NEXT:    s_cselect_b32 s4, s11, s6
+; GCN-NEXT:    s_xor_b64 s[4:5], s[4:5], s[8:9]
+; GCN-NEXT:    s_sub_u32 s4, s4, s8
+; GCN-NEXT:    s_subb_u32 s5, s5, s8
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_srem33_64:
@@ -1235,110 +1311,145 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-LABEL: s_test_srem_k_num_i64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_ashr_i32 s4, s3, 31
 ; GCN-NEXT:    s_add_u32 s2, s2, s4
 ; GCN-NEXT:    s_mov_b32 s5, s4
 ; GCN-NEXT:    s_addc_u32 s3, s3, s4
-; GCN-NEXT:    s_xor_b64 s[8:9], s[2:3], s[4:5]
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s9
-; GCN-NEXT:    s_sub_u32 s2, 0, s8
-; GCN-NEXT:    s_subb_u32 s3, 0, s9
-; GCN-NEXT:    s_mov_b32 s4, s0
+; GCN-NEXT:    s_xor_b64 s[4:5], s[2:3], s[4:5]
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s5
+; GCN-NEXT:    s_sub_u32 s2, 0, s4
+; GCN-NEXT:    s_subb_u32 s8, 0, s5
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_lo_u32 v2, s2, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s2, v0
-; GCN-NEXT:    v_mul_lo_u32 v5, s3, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s2, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v6, v1, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v8, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s2, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s2, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s3, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s2, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, 24
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, 24
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, 24
-; GCN-NEXT:    v_mov_b32_e32 v3, s9
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v1, s9, v0
-; GCN-NEXT:    v_mul_hi_u32 v2, s8, v0
-; GCN-NEXT:    v_mul_lo_u32 v0, s8, v0
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 0, v1
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s8, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s9, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s8, v4
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s9, v5
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s8, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s9, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v4, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s8, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s9, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_mul_hi_u32 v2, s2, v0
+; GCN-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NEXT:    v_readfirstlane_b32 s6, v0
+; GCN-NEXT:    s_mul_i32 s7, s2, s9
+; GCN-NEXT:    v_readfirstlane_b32 s12, v2
+; GCN-NEXT:    s_mul_i32 s10, s8, s6
+; GCN-NEXT:    s_mul_i32 s11, s2, s6
+; GCN-NEXT:    s_add_i32 s7, s12, s7
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s11
+; GCN-NEXT:    s_add_i32 s7, s7, s10
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s7
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s11
+; GCN-NEXT:    v_readfirstlane_b32 s10, v3
+; GCN-NEXT:    s_mul_i32 s13, s6, s7
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s7
+; GCN-NEXT:    s_add_u32 s10, s10, s13
+; GCN-NEXT:    v_readfirstlane_b32 s13, v0
+; GCN-NEXT:    s_mul_i32 s11, s9, s11
+; GCN-NEXT:    s_addc_u32 s13, 0, s13
+; GCN-NEXT:    v_readfirstlane_b32 s12, v4
+; GCN-NEXT:    s_add_u32 s10, s10, s11
+; GCN-NEXT:    s_addc_u32 s10, s13, s12
+; GCN-NEXT:    v_readfirstlane_b32 s11, v1
+; GCN-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-NEXT:    s_mul_i32 s7, s9, s7
+; GCN-NEXT:    s_add_u32 s7, s10, s7
+; GCN-NEXT:    s_addc_u32 s10, 0, s11
+; GCN-NEXT:    s_add_i32 s11, s6, s7
+; GCN-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT:    s_or_b32 s6, s6, s7
+; GCN-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-NEXT:    s_addc_u32 s9, s9, s10
+; GCN-NEXT:    s_mul_i32 s6, s2, s9
+; GCN-NEXT:    v_readfirstlane_b32 s7, v0
+; GCN-NEXT:    s_add_i32 s6, s7, s6
+; GCN-NEXT:    s_mul_i32 s8, s8, s11
+; GCN-NEXT:    s_mul_i32 s2, s2, s11
+; GCN-NEXT:    s_add_i32 s6, s6, s8
+; GCN-NEXT:    v_mov_b32_e32 v2, s2
+; GCN-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NEXT:    v_mul_hi_u32 v3, s9, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s9, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GCN-NEXT:    s_mul_i32 s8, s11, s6
+; GCN-NEXT:    v_readfirstlane_b32 s12, v2
+; GCN-NEXT:    s_add_u32 s8, s12, s8
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    s_mul_i32 s2, s9, s2
+; GCN-NEXT:    s_addc_u32 s10, 0, s10
+; GCN-NEXT:    v_readfirstlane_b32 s7, v3
+; GCN-NEXT:    s_add_u32 s2, s8, s2
+; GCN-NEXT:    s_addc_u32 s2, s10, s7
+; GCN-NEXT:    v_readfirstlane_b32 s7, v1
+; GCN-NEXT:    s_addc_u32 s7, s7, 0
+; GCN-NEXT:    s_mul_i32 s6, s9, s6
+; GCN-NEXT:    s_add_u32 s2, s2, s6
+; GCN-NEXT:    s_addc_u32 s8, 0, s7
+; GCN-NEXT:    s_add_i32 s11, s11, s2
+; GCN-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GCN-NEXT:    s_or_b32 s2, s6, s7
+; GCN-NEXT:    s_cmp_lg_u32 s2, 0
+; GCN-NEXT:    s_addc_u32 s6, s9, s8
+; GCN-NEXT:    v_mul_hi_u32 v1, s11, 24
+; GCN-NEXT:    v_mul_hi_u32 v0, s6, 24
+; GCN-NEXT:    s_mul_i32 s6, s6, 24
+; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    v_readfirstlane_b32 s8, v1
+; GCN-NEXT:    v_readfirstlane_b32 s7, v0
+; GCN-NEXT:    s_add_u32 s6, s8, s6
+; GCN-NEXT:    s_addc_u32 s6, 0, s7
+; GCN-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-NEXT:    v_mul_hi_u32 v0, s4, v0
+; GCN-NEXT:    s_mul_i32 s7, s5, s6
+; GCN-NEXT:    s_mul_i32 s6, s4, s6
+; GCN-NEXT:    v_readfirstlane_b32 s8, v0
+; GCN-NEXT:    s_add_i32 s8, s8, s7
+; GCN-NEXT:    s_sub_i32 s9, 0, s8
+; GCN-NEXT:    s_sub_i32 s10, 24, s6
+; GCN-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GCN-NEXT:    s_or_b32 s11, s6, s7
+; GCN-NEXT:    s_cmp_lg_u32 s11, 0
+; GCN-NEXT:    s_subb_u32 s9, s9, s5
+; GCN-NEXT:    s_sub_i32 s12, s10, s4
+; GCN-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GCN-NEXT:    s_or_b32 s6, s6, s7
+; GCN-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-NEXT:    s_subb_u32 s13, s9, 0
+; GCN-NEXT:    s_cmp_ge_u32 s13, s5
+; GCN-NEXT:    s_cselect_b32 s7, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s12, s4
+; GCN-NEXT:    s_cselect_b32 s14, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s13, s5
+; GCN-NEXT:    s_cselect_b32 s14, s14, s7
+; GCN-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-NEXT:    s_subb_u32 s9, s9, s5
+; GCN-NEXT:    s_sub_i32 s15, s12, s4
+; GCN-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GCN-NEXT:    s_or_b32 s6, s6, s7
+; GCN-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-NEXT:    s_subb_u32 s6, s9, 0
+; GCN-NEXT:    s_cmp_lg_u32 s14, 0
+; GCN-NEXT:    s_cselect_b32 s7, s15, s12
+; GCN-NEXT:    s_cselect_b32 s6, s6, s13
+; GCN-NEXT:    s_cmp_lg_u32 s11, 0
+; GCN-NEXT:    s_subb_u32 s8, 0, s8
+; GCN-NEXT:    s_cmp_ge_u32 s8, s5
+; GCN-NEXT:    s_cselect_b32 s9, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s10, s4
+; GCN-NEXT:    s_cselect_b32 s4, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s8, s5
+; GCN-NEXT:    s_cselect_b32 s4, s4, s9
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_cselect_b32 s4, s6, s8
+; GCN-NEXT:    s_cselect_b32 s5, s7, s10
+; GCN-NEXT:    v_mov_b32_e32 v0, s5
+; GCN-NEXT:    v_mov_b32_e32 v1, s4
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_srem_k_num_i64:
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index f144d36589894..6efce53aaa5fe 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -50,7 +50,7 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; GCN-NEXT:    v_mul_lo_u32 v3, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
+; GCN-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
 ; GCN-NEXT:    v_mul_lo_u32 v6, v0, v2
 ; GCN-NEXT:    v_mul_hi_u32 v7, v0, v3
 ; GCN-NEXT:    v_mul_hi_u32 v8, v0, v2
@@ -787,104 +787,137 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GCN-NEXT:    s_sub_u32 s4, 0, s2
-; GCN-NEXT:    s_subb_u32 s5, 0, s3
+; GCN-NEXT:    s_sub_u32 s6, 0, s2
+; GCN-NEXT:    s_subb_u32 s8, 0, s3
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v5, s5, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v6, v1, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v8, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s4, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s4, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s5, v0
-; GCN-NEXT:    s_mov_b32 s5, s1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s4, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, 24
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, 24
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, 24
-; GCN-NEXT:    v_mov_b32_e32 v4, s3
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_mul_hi_u32 v2, s6, v0
+; GCN-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NEXT:    v_readfirstlane_b32 s4, v0
+; GCN-NEXT:    s_mul_i32 s5, s6, s9
+; GCN-NEXT:    v_readfirstlane_b32 s12, v2
+; GCN-NEXT:    s_mul_i32 s10, s8, s4
+; GCN-NEXT:    s_mul_i32 s11, s6, s4
+; GCN-NEXT:    s_add_i32 s5, s12, s5
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s11
+; GCN-NEXT:    s_add_i32 s5, s5, s10
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s5
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s11
+; GCN-NEXT:    v_readfirstlane_b32 s10, v3
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s5
+; GCN-NEXT:    s_mul_i32 s13, s4, s5
+; GCN-NEXT:    s_add_u32 s10, s10, s13
+; GCN-NEXT:    v_readfirstlane_b32 s13, v0
+; GCN-NEXT:    s_mul_i32 s11, s9, s11
+; GCN-NEXT:    s_addc_u32 s13, 0, s13
+; GCN-NEXT:    v_readfirstlane_b32 s12, v4
+; GCN-NEXT:    s_add_u32 s10, s10, s11
+; GCN-NEXT:    v_readfirstlane_b32 s14, v1
+; GCN-NEXT:    s_addc_u32 s10, s13, s12
+; GCN-NEXT:    s_addc_u32 s11, s14, 0
+; GCN-NEXT:    s_mul_i32 s5, s9, s5
+; GCN-NEXT:    s_add_u32 s5, s10, s5
+; GCN-NEXT:    s_addc_u32 s10, 0, s11
+; GCN-NEXT:    s_add_i32 s11, s4, s5
+; GCN-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    v_mul_hi_u32 v0, s6, v0
+; GCN-NEXT:    s_or_b32 s4, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_addc_u32 s9, s9, s10
+; GCN-NEXT:    s_mul_i32 s4, s6, s9
+; GCN-NEXT:    v_readfirstlane_b32 s5, v0
+; GCN-NEXT:    s_add_i32 s4, s5, s4
+; GCN-NEXT:    s_mul_i32 s8, s8, s11
+; GCN-NEXT:    s_mul_i32 s5, s6, s11
+; GCN-NEXT:    s_add_i32 s4, s4, s8
+; GCN-NEXT:    v_mov_b32_e32 v2, s5
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mul_hi_u32 v3, s9, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s9, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GCN-NEXT:    s_mul_i32 s8, s11, s4
+; GCN-NEXT:    v_readfirstlane_b32 s12, v2
+; GCN-NEXT:    s_add_u32 s8, s12, s8
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    s_mul_i32 s5, s9, s5
+; GCN-NEXT:    s_addc_u32 s10, 0, s10
+; GCN-NEXT:    v_readfirstlane_b32 s6, v3
+; GCN-NEXT:    s_add_u32 s5, s8, s5
+; GCN-NEXT:    s_addc_u32 s5, s10, s6
+; GCN-NEXT:    v_readfirstlane_b32 s6, v1
+; GCN-NEXT:    s_addc_u32 s6, s6, 0
+; GCN-NEXT:    s_mul_i32 s4, s9, s4
+; GCN-NEXT:    s_add_u32 s4, s5, s4
+; GCN-NEXT:    s_addc_u32 s6, 0, s6
+; GCN-NEXT:    s_add_i32 s11, s11, s4
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_or_b32 s4, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_addc_u32 s4, s9, s6
+; GCN-NEXT:    v_mul_hi_u32 v1, s11, 24
+; GCN-NEXT:    v_mul_hi_u32 v0, s4, 24
+; GCN-NEXT:    s_mul_i32 s4, s4, 24
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    v_readfirstlane_b32 s8, v1
+; GCN-NEXT:    v_readfirstlane_b32 s5, v0
+; GCN-NEXT:    s_add_u32 s4, s8, s4
+; GCN-NEXT:    s_addc_u32 s8, 0, s5
+; GCN-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
 ; GCN-NEXT:    s_mov_b32 s4, s0
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v1, s3, v0
-; GCN-NEXT:    v_mul_hi_u32 v2, s2, v0
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; GCN-NEXT:    v_mul_lo_u32 v2, s2, v0
-; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 24, v2
-; GCN-NEXT:    v_subb_u32_e64 v3, s[0:1], v3, v4, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s2, v2
-; GCN-NEXT:    v_subbrev_u32_e64 v3, s[0:1], 0, v3, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[0:1]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s3, v3
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v5, v4, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v4, s[0:1], 1, v0
-; GCN-NEXT:    v_addc_u32_e64 v5, s[0:1], 0, 0, s[0:1]
-; GCN-NEXT:    v_add_i32_e64 v6, s[0:1], 2, v0
-; GCN-NEXT:    v_addc_u32_e64 v7, s[0:1], 0, 0, s[0:1]
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v3
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s3, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v4, v6, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v5, v7, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s2, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s3, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v5, v2, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v4, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
+; GCN-NEXT:    s_mov_b32 s5, s1
+; GCN-NEXT:    s_mul_i32 s0, s3, s8
+; GCN-NEXT:    v_readfirstlane_b32 s1, v0
+; GCN-NEXT:    s_add_i32 s9, s1, s0
+; GCN-NEXT:    s_sub_i32 s10, 0, s9
+; GCN-NEXT:    s_mul_i32 s0, s2, s8
+; GCN-NEXT:    s_sub_i32 s11, 24, s0
+; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    s_or_b32 s12, s0, s1
+; GCN-NEXT:    s_cmp_lg_u32 s12, 0
+; GCN-NEXT:    s_subb_u32 s10, s10, s3
+; GCN-NEXT:    s_sub_i32 s13, s11, s2
+; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_subb_u32 s0, s10, 0
+; GCN-NEXT:    s_cmp_ge_u32 s0, s3
+; GCN-NEXT:    s_cselect_b32 s1, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s13, s2
+; GCN-NEXT:    s_cselect_b32 s10, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s0, s3
+; GCN-NEXT:    s_cselect_b32 s0, s10, s1
+; GCN-NEXT:    s_add_u32 s1, s8, 1
+; GCN-NEXT:    s_addc_u32 s10, 0, 0
+; GCN-NEXT:    s_add_u32 s13, s8, 2
+; GCN-NEXT:    s_addc_u32 s14, 0, 0
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_cselect_b32 s0, s13, s1
+; GCN-NEXT:    s_cselect_b32 s1, s14, s10
+; GCN-NEXT:    s_cmp_lg_u32 s12, 0
+; GCN-NEXT:    s_subb_u32 s9, 0, s9
+; GCN-NEXT:    s_cmp_ge_u32 s9, s3
+; GCN-NEXT:    s_cselect_b32 s10, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s11, s2
+; GCN-NEXT:    s_cselect_b32 s2, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s9, s3
+; GCN-NEXT:    s_cselect_b32 s2, s2, s10
+; GCN-NEXT:    s_cmp_lg_u32 s2, 0
+; GCN-NEXT:    s_cselect_b32 s1, s1, 0
+; GCN-NEXT:    s_cselect_b32 s0, s0, s8
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 15a940f1b1dee..05c003eefa850 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -5,119 +5,159 @@
 define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-LABEL: s_test_urem_i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx2 s[12:13], s[4:5], 0xd
-; GCN-NEXT:    s_load_dwordx4 s[8:11], s[4:5], 0x9
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0xd
+; GCN-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s12
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s13
-; GCN-NEXT:    s_sub_u32 s0, 0, s12
-; GCN-NEXT:    s_subb_u32 s1, 0, s13
-; GCN-NEXT:    s_mov_b32 s4, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s9
+; GCN-NEXT:    s_sub_u32 s10, 0, s8
+; GCN-NEXT:    s_subb_u32 s11, 0, s9
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    s_mov_b32 s5, s9
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_lo_u32 v2, s0, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s0, v0
-; GCN-NEXT:    v_mul_lo_u32 v5, s1, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s0, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v6, v1, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v8, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s0, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s0, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s0, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s10, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s10, v0
-; GCN-NEXT:    v_mul_hi_u32 v4, s10, v1
-; GCN-NEXT:    v_mul_hi_u32 v5, s11, v1
-; GCN-NEXT:    v_mul_lo_u32 v1, s11, v1
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, s11, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v3, v0, vcc
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GCN-NEXT:    v_mul_lo_u32 v1, s12, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, s12, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, s13, v0
-; GCN-NEXT:    v_mul_lo_u32 v0, s12, v0
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s11, v1
-; GCN-NEXT:    v_mov_b32_e32 v3, s13
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, s10, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s12, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s12, v4
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s13, v5
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s12, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v4, v3, s[0:1]
-; GCN-NEXT:    v_mov_b32_e32 v4, s11
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, v4, v1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s13, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s12, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s13, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
+; GCN-NEXT:    v_mul_hi_u32 v2, s10, v0
+; GCN-NEXT:    v_readfirstlane_b32 s12, v1
+; GCN-NEXT:    v_readfirstlane_b32 s0, v0
+; GCN-NEXT:    s_mul_i32 s1, s10, s12
+; GCN-NEXT:    v_readfirstlane_b32 s15, v2
+; GCN-NEXT:    s_mul_i32 s13, s11, s0
+; GCN-NEXT:    s_mul_i32 s14, s10, s0
+; GCN-NEXT:    s_add_i32 s1, s15, s1
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s14
+; GCN-NEXT:    s_add_i32 s1, s1, s13
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s1
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s14
+; GCN-NEXT:    v_readfirstlane_b32 s13, v3
+; GCN-NEXT:    s_mul_i32 s15, s0, s1
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s1
+; GCN-NEXT:    s_add_u32 s13, s13, s15
+; GCN-NEXT:    v_readfirstlane_b32 s15, v0
+; GCN-NEXT:    s_mul_i32 s14, s12, s14
+; GCN-NEXT:    s_addc_u32 s15, 0, s15
+; GCN-NEXT:    v_readfirstlane_b32 s16, v4
+; GCN-NEXT:    s_add_u32 s13, s13, s14
+; GCN-NEXT:    s_addc_u32 s13, s15, s16
+; GCN-NEXT:    v_readfirstlane_b32 s14, v1
+; GCN-NEXT:    s_addc_u32 s14, s14, 0
+; GCN-NEXT:    s_mul_i32 s1, s12, s1
+; GCN-NEXT:    s_add_u32 s1, s13, s1
+; GCN-NEXT:    s_addc_u32 s13, 0, s14
+; GCN-NEXT:    s_add_i32 s14, s0, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    v_mul_hi_u32 v0, s10, v0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_addc_u32 s12, s12, s13
+; GCN-NEXT:    s_mul_i32 s0, s10, s12
+; GCN-NEXT:    v_readfirstlane_b32 s1, v0
+; GCN-NEXT:    s_add_i32 s0, s1, s0
+; GCN-NEXT:    s_mul_i32 s11, s11, s14
+; GCN-NEXT:    s_mul_i32 s1, s10, s14
+; GCN-NEXT:    s_add_i32 s0, s0, s11
+; GCN-NEXT:    v_mov_b32_e32 v2, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s0
+; GCN-NEXT:    v_mul_hi_u32 v3, s12, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s14, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s12, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s14, v0
+; GCN-NEXT:    s_mul_i32 s11, s14, s0
+; GCN-NEXT:    v_readfirstlane_b32 s15, v2
+; GCN-NEXT:    s_add_u32 s11, s15, s11
+; GCN-NEXT:    v_readfirstlane_b32 s13, v0
+; GCN-NEXT:    s_mul_i32 s1, s12, s1
+; GCN-NEXT:    s_addc_u32 s13, 0, s13
+; GCN-NEXT:    v_readfirstlane_b32 s10, v3
+; GCN-NEXT:    s_add_u32 s1, s11, s1
+; GCN-NEXT:    s_addc_u32 s1, s13, s10
+; GCN-NEXT:    v_readfirstlane_b32 s10, v1
+; GCN-NEXT:    s_addc_u32 s10, s10, 0
+; GCN-NEXT:    s_mul_i32 s0, s12, s0
+; GCN-NEXT:    s_add_u32 s0, s1, s0
+; GCN-NEXT:    s_addc_u32 s10, 0, s10
+; GCN-NEXT:    s_add_i32 s11, s14, s0
+; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_addc_u32 s1, s12, s10
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mul_hi_u32 v1, s6, v0
+; GCN-NEXT:    v_mov_b32_e32 v2, s11
+; GCN-NEXT:    v_mul_hi_u32 v3, s6, v2
+; GCN-NEXT:    s_mov_b32 s0, s4
+; GCN-NEXT:    v_readfirstlane_b32 s10, v1
+; GCN-NEXT:    v_mul_hi_u32 v1, s7, v2
+; GCN-NEXT:    s_mul_i32 s4, s6, s1
+; GCN-NEXT:    v_readfirstlane_b32 s12, v3
+; GCN-NEXT:    v_mul_hi_u32 v0, s7, v0
+; GCN-NEXT:    s_add_u32 s4, s12, s4
+; GCN-NEXT:    s_addc_u32 s10, 0, s10
+; GCN-NEXT:    s_mul_i32 s11, s7, s11
+; GCN-NEXT:    v_readfirstlane_b32 s12, v1
+; GCN-NEXT:    s_add_u32 s4, s4, s11
+; GCN-NEXT:    s_addc_u32 s4, s10, s12
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    s_addc_u32 s10, s10, 0
+; GCN-NEXT:    s_mul_i32 s1, s7, s1
+; GCN-NEXT:    s_add_u32 s4, s4, s1
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mul_hi_u32 v0, s8, v0
+; GCN-NEXT:    s_mov_b32 s1, s5
+; GCN-NEXT:    s_addc_u32 s5, 0, s10
+; GCN-NEXT:    s_mul_i32 s5, s8, s5
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    s_add_i32 s5, s10, s5
+; GCN-NEXT:    s_mul_i32 s10, s9, s4
+; GCN-NEXT:    s_add_i32 s10, s5, s10
+; GCN-NEXT:    s_sub_i32 s11, s7, s10
+; GCN-NEXT:    s_mul_i32 s4, s8, s4
+; GCN-NEXT:    s_sub_i32 s6, s6, s4
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_or_b32 s12, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s12, 0
+; GCN-NEXT:    s_subb_u32 s11, s11, s9
+; GCN-NEXT:    s_sub_i32 s13, s6, s8
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_or_b32 s4, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_subb_u32 s14, s11, 0
+; GCN-NEXT:    s_cmp_ge_u32 s14, s9
+; GCN-NEXT:    s_cselect_b32 s5, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s13, s8
+; GCN-NEXT:    s_cselect_b32 s15, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s14, s9
+; GCN-NEXT:    s_cselect_b32 s15, s15, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_subb_u32 s11, s11, s9
+; GCN-NEXT:    s_sub_i32 s16, s13, s8
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_or_b32 s4, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_subb_u32 s4, s11, 0
+; GCN-NEXT:    s_cmp_lg_u32 s15, 0
+; GCN-NEXT:    s_cselect_b32 s5, s16, s13
+; GCN-NEXT:    s_cselect_b32 s4, s4, s14
+; GCN-NEXT:    s_cmp_lg_u32 s12, 0
+; GCN-NEXT:    s_subb_u32 s7, s7, s10
+; GCN-NEXT:    s_cmp_ge_u32 s7, s9
+; GCN-NEXT:    s_cselect_b32 s10, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s6, s8
+; GCN-NEXT:    s_cselect_b32 s8, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s7, s9
+; GCN-NEXT:    s_cselect_b32 s8, s8, s10
+; GCN-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-NEXT:    s_cselect_b32 s4, s4, s7
+; GCN-NEXT:    s_cselect_b32 s5, s5, s6
+; GCN-NEXT:    v_mov_b32_e32 v0, s5
+; GCN-NEXT:    v_mov_b32_e32 v1, s4
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_urem_i64:
@@ -763,106 +803,143 @@ define amdgpu_kernel void @s_test_urem23_64_v2i64(ptr addrspace(1) %out, <2 x i6
 define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x) {
 ; GCN-LABEL: s_test_urem_k_num_i64:
 ; GCN:       ; %bb.0:
-; GCN-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
-; GCN-NEXT:    s_mov_b32 s11, 0xf000
-; GCN-NEXT:    s_mov_b32 s10, -1
+; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s6
-; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s7
-; GCN-NEXT:    s_sub_u32 s0, 0, s6
-; GCN-NEXT:    s_subb_u32 s1, 0, s7
-; GCN-NEXT:    s_mov_b32 s8, s4
+; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s3
+; GCN-NEXT:    s_sub_u32 s6, 0, s2
+; GCN-NEXT:    s_subb_u32 s8, 0, s3
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    s_mov_b32 s9, s5
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
-; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_mul_lo_u32 v2, s0, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s0, v0
-; GCN-NEXT:    v_mul_lo_u32 v5, s1, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s0, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, v4
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v6, v1, v4
-; GCN-NEXT:    v_mul_lo_u32 v4, v1, v4
-; GCN-NEXT:    v_mul_hi_u32 v8, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v5, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v8, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s0, v1
-; GCN-NEXT:    v_mul_hi_u32 v3, s0, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s1, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, s0, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v7, v0, v3
-; GCN-NEXT:    v_mul_hi_u32 v8, v0, v2
-; GCN-NEXT:    v_mul_hi_u32 v5, v1, v3
-; GCN-NEXT:    v_mul_lo_u32 v3, v1, v3
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-NEXT:    v_addc_u32_e32 v7, vcc, 0, v8, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, v2
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v7, v5, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, v1, 24
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, 24
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, 24
-; GCN-NEXT:    v_mov_b32_e32 v3, s7
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v1, s7, v0
+; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_mul_hi_u32 v2, s6, v0
-; GCN-NEXT:    v_mul_lo_u32 v0, s6, v0
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 0, v1
-; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
-; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s6, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v5, s[2:3], 0, v2, s[0:1]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s7, v5
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_le_u32_e64 s[2:3], s6, v4
-; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[2:3]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[2:3], s7, v5
-; GCN-NEXT:    v_subrev_i32_e64 v3, s[0:1], s6, v4
-; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v7, s[2:3]
-; GCN-NEXT:    v_subbrev_u32_e64 v2, s[0:1], 0, v2, s[0:1]
-; GCN-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v6
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s7, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v3, v4, v3, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s6, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s7, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v4, v4, v5, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
-; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NEXT:    v_readfirstlane_b32 s4, v0
+; GCN-NEXT:    s_mul_i32 s5, s6, s9
+; GCN-NEXT:    v_readfirstlane_b32 s12, v2
+; GCN-NEXT:    s_mul_i32 s10, s8, s4
+; GCN-NEXT:    s_mul_i32 s11, s6, s4
+; GCN-NEXT:    s_add_i32 s5, s12, s5
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s11
+; GCN-NEXT:    s_add_i32 s5, s5, s10
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s5
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s11
+; GCN-NEXT:    v_readfirstlane_b32 s10, v3
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s5
+; GCN-NEXT:    s_mul_i32 s13, s4, s5
+; GCN-NEXT:    s_add_u32 s10, s10, s13
+; GCN-NEXT:    v_readfirstlane_b32 s13, v0
+; GCN-NEXT:    s_mul_i32 s11, s9, s11
+; GCN-NEXT:    s_addc_u32 s13, 0, s13
+; GCN-NEXT:    v_readfirstlane_b32 s12, v4
+; GCN-NEXT:    s_add_u32 s10, s10, s11
+; GCN-NEXT:    v_readfirstlane_b32 s14, v1
+; GCN-NEXT:    s_addc_u32 s10, s13, s12
+; GCN-NEXT:    s_addc_u32 s11, s14, 0
+; GCN-NEXT:    s_mul_i32 s5, s9, s5
+; GCN-NEXT:    s_add_u32 s5, s10, s5
+; GCN-NEXT:    s_addc_u32 s10, 0, s11
+; GCN-NEXT:    s_add_i32 s11, s4, s5
+; GCN-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    v_mul_hi_u32 v0, s6, v0
+; GCN-NEXT:    s_or_b32 s4, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_addc_u32 s9, s9, s10
+; GCN-NEXT:    s_mul_i32 s4, s6, s9
+; GCN-NEXT:    v_readfirstlane_b32 s5, v0
+; GCN-NEXT:    s_add_i32 s4, s5, s4
+; GCN-NEXT:    s_mul_i32 s8, s8, s11
+; GCN-NEXT:    s_mul_i32 s5, s6, s11
+; GCN-NEXT:    s_add_i32 s4, s4, s8
+; GCN-NEXT:    v_mov_b32_e32 v2, s5
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mul_hi_u32 v3, s9, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s9, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GCN-NEXT:    s_mul_i32 s8, s11, s4
+; GCN-NEXT:    v_readfirstlane_b32 s12, v2
+; GCN-NEXT:    s_add_u32 s8, s12, s8
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    s_mul_i32 s5, s9, s5
+; GCN-NEXT:    s_addc_u32 s10, 0, s10
+; GCN-NEXT:    v_readfirstlane_b32 s6, v3
+; GCN-NEXT:    s_add_u32 s5, s8, s5
+; GCN-NEXT:    s_addc_u32 s5, s10, s6
+; GCN-NEXT:    v_readfirstlane_b32 s6, v1
+; GCN-NEXT:    s_addc_u32 s6, s6, 0
+; GCN-NEXT:    s_mul_i32 s4, s9, s4
+; GCN-NEXT:    s_add_u32 s4, s5, s4
+; GCN-NEXT:    s_addc_u32 s6, 0, s6
+; GCN-NEXT:    s_add_i32 s11, s11, s4
+; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_or_b32 s4, s4, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_addc_u32 s4, s9, s6
+; GCN-NEXT:    v_mul_hi_u32 v1, s11, 24
+; GCN-NEXT:    v_mul_hi_u32 v0, s4, 24
+; GCN-NEXT:    s_mul_i32 s4, s4, 24
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    v_readfirstlane_b32 s8, v1
+; GCN-NEXT:    v_readfirstlane_b32 s5, v0
+; GCN-NEXT:    s_add_u32 s4, s8, s4
+; GCN-NEXT:    s_addc_u32 s8, 0, s5
+; GCN-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GCN-NEXT:    s_mov_b32 s4, s0
+; GCN-NEXT:    s_mov_b32 s5, s1
+; GCN-NEXT:    s_mul_i32 s0, s3, s8
+; GCN-NEXT:    v_readfirstlane_b32 s1, v0
+; GCN-NEXT:    s_add_i32 s9, s1, s0
+; GCN-NEXT:    s_sub_i32 s10, 0, s9
+; GCN-NEXT:    s_mul_i32 s0, s2, s8
+; GCN-NEXT:    s_sub_i32 s8, 24, s0
+; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    s_or_b32 s11, s0, s1
+; GCN-NEXT:    s_cmp_lg_u32 s11, 0
+; GCN-NEXT:    s_subb_u32 s10, s10, s3
+; GCN-NEXT:    s_sub_i32 s12, s8, s2
+; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_subb_u32 s13, s10, 0
+; GCN-NEXT:    s_cmp_ge_u32 s13, s3
+; GCN-NEXT:    s_cselect_b32 s1, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s12, s2
+; GCN-NEXT:    s_cselect_b32 s14, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s13, s3
+; GCN-NEXT:    s_cselect_b32 s14, s14, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_subb_u32 s10, s10, s3
+; GCN-NEXT:    s_sub_i32 s15, s12, s2
+; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    s_or_b32 s0, s0, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_subb_u32 s0, s10, 0
+; GCN-NEXT:    s_cmp_lg_u32 s14, 0
+; GCN-NEXT:    s_cselect_b32 s1, s15, s12
+; GCN-NEXT:    s_cselect_b32 s0, s0, s13
+; GCN-NEXT:    s_cmp_lg_u32 s11, 0
+; GCN-NEXT:    s_subb_u32 s9, 0, s9
+; GCN-NEXT:    s_cmp_ge_u32 s9, s3
+; GCN-NEXT:    s_cselect_b32 s10, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s8, s2
+; GCN-NEXT:    s_cselect_b32 s2, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s9, s3
+; GCN-NEXT:    s_cselect_b32 s2, s2, s10
+; GCN-NEXT:    s_cmp_lg_u32 s2, 0
+; GCN-NEXT:    s_cselect_b32 s0, s0, s9
+; GCN-NEXT:    s_cselect_b32 s1, s1, s8
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
 ; GCN-IR-LABEL: s_test_urem_k_num_i64:
diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index 2a76d83cd7dac..89f719da21ebf 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -730,19 +730,19 @@ bb:
 define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
 ; GFX1032-LABEL: test_udiv64:
 ; GFX1032:       ; %bb.0: ; %bb
-; GFX1032-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x24
+; GFX1032-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x24
 ; GFX1032-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
+; GFX1032-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
 ; GFX1032-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT:    s_or_b64 s[8:9], s[6:7], s[4:5]
-; GFX1032-NEXT:    s_mov_b32 s8, 0
-; GFX1032-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX1032-NEXT:    s_or_b64 s[4:5], s[2:3], s[0:1]
+; GFX1032-NEXT:    s_mov_b32 s4, 0
+; GFX1032-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; GFX1032-NEXT:    s_cbranch_scc0 .LBB15_4
 ; GFX1032-NEXT:  ; %bb.1:
-; GFX1032-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX1032-NEXT:    v_cvt_f32_u32_e32 v1, s5
-; GFX1032-NEXT:    s_sub_u32 s9, 0, s4
-; GFX1032-NEXT:    s_subb_u32 s10, 0, s5
+; GFX1032-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GFX1032-NEXT:    v_cvt_f32_u32_e32 v1, s1
+; GFX1032-NEXT:    s_sub_u32 s9, 0, s0
+; GFX1032-NEXT:    s_subb_u32 s10, 0, s1
 ; GFX1032-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX1032-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX1032-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -751,160 +751,158 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
 ; GFX1032-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GFX1032-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX1032-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1032-NEXT:    v_readfirstlane_b32 s0, v1
-; GFX1032-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1032-NEXT:    s_mul_i32 s11, s9, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s13, s9, s1
-; GFX1032-NEXT:    s_mul_i32 s12, s10, s1
+; GFX1032-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX1032-NEXT:    v_readfirstlane_b32 s8, v0
+; GFX1032-NEXT:    s_mul_i32 s11, s9, s5
+; GFX1032-NEXT:    s_mul_hi_u32 s13, s9, s8
+; GFX1032-NEXT:    s_mul_i32 s12, s10, s8
 ; GFX1032-NEXT:    s_add_i32 s11, s13, s11
-; GFX1032-NEXT:    s_mul_i32 s14, s9, s1
+; GFX1032-NEXT:    s_mul_i32 s14, s9, s8
 ; GFX1032-NEXT:    s_add_i32 s11, s11, s12
-; GFX1032-NEXT:    s_mul_hi_u32 s13, s1, s14
-; GFX1032-NEXT:    s_mul_hi_u32 s15, s0, s14
-; GFX1032-NEXT:    s_mul_i32 s12, s0, s14
-; GFX1032-NEXT:    s_mul_hi_u32 s14, s1, s11
-; GFX1032-NEXT:    s_mul_i32 s1, s1, s11
-; GFX1032-NEXT:    s_mul_hi_u32 s16, s0, s11
-; GFX1032-NEXT:    s_add_u32 s1, s13, s1
-; GFX1032-NEXT:    s_addc_u32 s13, 0, s14
-; GFX1032-NEXT:    s_add_u32 s1, s1, s12
-; GFX1032-NEXT:    s_mul_i32 s11, s0, s11
-; GFX1032-NEXT:    s_addc_u32 s1, s13, s15
-; GFX1032-NEXT:    s_addc_u32 s12, s16, 0
-; GFX1032-NEXT:    s_add_u32 s1, s1, s11
-; GFX1032-NEXT:    s_addc_u32 s11, 0, s12
-; GFX1032-NEXT:    v_add_co_u32 v0, s1, v0, s1
-; GFX1032-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX1032-NEXT:    s_addc_u32 s0, s0, s11
-; GFX1032-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1032-NEXT:    s_mul_i32 s11, s9, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s12, s9, s1
-; GFX1032-NEXT:    s_mul_i32 s10, s10, s1
-; GFX1032-NEXT:    s_add_i32 s11, s12, s11
-; GFX1032-NEXT:    s_mul_i32 s9, s9, s1
-; GFX1032-NEXT:    s_add_i32 s11, s11, s10
-; GFX1032-NEXT:    s_mul_hi_u32 s12, s0, s9
-; GFX1032-NEXT:    s_mul_i32 s13, s0, s9
-; GFX1032-NEXT:    s_mul_hi_u32 s9, s1, s9
-; GFX1032-NEXT:    s_mul_hi_u32 s14, s1, s11
-; GFX1032-NEXT:    s_mul_i32 s1, s1, s11
-; GFX1032-NEXT:    s_mul_hi_u32 s10, s0, s11
-; GFX1032-NEXT:    s_add_u32 s1, s9, s1
-; GFX1032-NEXT:    s_addc_u32 s9, 0, s14
-; GFX1032-NEXT:    s_add_u32 s1, s1, s13
-; GFX1032-NEXT:    s_mul_i32 s11, s0, s11
-; GFX1032-NEXT:    s_addc_u32 s1, s9, s12
-; GFX1032-NEXT:    s_addc_u32 s9, s10, 0
-; GFX1032-NEXT:    s_add_u32 s1, s1, s11
-; GFX1032-NEXT:    s_addc_u32 s9, 0, s9
-; GFX1032-NEXT:    v_add_co_u32 v0, s1, v0, s1
-; GFX1032-NEXT:    s_cmp_lg_u32 s1, 0
-; GFX1032-NEXT:    s_addc_u32 s0, s0, s9
-; GFX1032-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1032-NEXT:    s_mul_i32 s10, s6, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s9, s6, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s11, s7, s0
-; GFX1032-NEXT:    s_mul_i32 s0, s7, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s12, s6, s1
-; GFX1032-NEXT:    s_mul_hi_u32 s13, s7, s1
-; GFX1032-NEXT:    s_mul_i32 s1, s7, s1
-; GFX1032-NEXT:    s_add_u32 s10, s12, s10
-; GFX1032-NEXT:    s_addc_u32 s9, 0, s9
-; GFX1032-NEXT:    s_add_u32 s1, s10, s1
-; GFX1032-NEXT:    s_addc_u32 s1, s9, s13
-; GFX1032-NEXT:    s_addc_u32 s9, s11, 0
-; GFX1032-NEXT:    s_add_u32 s1, s1, s0
-; GFX1032-NEXT:    s_addc_u32 s9, 0, s9
-; GFX1032-NEXT:    s_mul_hi_u32 s0, s4, s1
-; GFX1032-NEXT:    s_mul_i32 s11, s4, s9
-; GFX1032-NEXT:    s_mul_i32 s12, s4, s1
-; GFX1032-NEXT:    s_add_i32 s0, s0, s11
-; GFX1032-NEXT:    v_sub_co_u32 v0, s11, s6, s12
-; GFX1032-NEXT:    s_mul_i32 s10, s5, s1
-; GFX1032-NEXT:    s_add_i32 s0, s0, s10
-; GFX1032-NEXT:    v_sub_co_u32 v1, s12, v0, s4
-; GFX1032-NEXT:    s_sub_i32 s10, s7, s0
+; GFX1032-NEXT:    s_mul_hi_u32 s13, s8, s14
+; GFX1032-NEXT:    s_mul_i32 s16, s8, s11
+; GFX1032-NEXT:    s_mul_hi_u32 s15, s5, s14
+; GFX1032-NEXT:    s_mul_i32 s12, s5, s14
+; GFX1032-NEXT:    s_mul_hi_u32 s14, s8, s11
+; GFX1032-NEXT:    s_add_u32 s13, s13, s16
+; GFX1032-NEXT:    s_addc_u32 s14, 0, s14
+; GFX1032-NEXT:    s_mul_hi_u32 s17, s5, s11
+; GFX1032-NEXT:    s_add_u32 s12, s13, s12
+; GFX1032-NEXT:    s_mul_i32 s11, s5, s11
+; GFX1032-NEXT:    s_addc_u32 s12, s14, s15
+; GFX1032-NEXT:    s_addc_u32 s13, s17, 0
+; GFX1032-NEXT:    s_add_u32 s11, s12, s11
+; GFX1032-NEXT:    s_addc_u32 s12, 0, s13
+; GFX1032-NEXT:    s_add_i32 s8, s8, s11
+; GFX1032-NEXT:    s_cselect_b32 s11, 1, 0
+; GFX1032-NEXT:    s_mul_hi_u32 s13, s9, s8
 ; GFX1032-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX1032-NEXT:    s_subb_u32 s10, s10, s5
+; GFX1032-NEXT:    s_mul_i32 s11, s9, s8
+; GFX1032-NEXT:    s_addc_u32 s5, s5, s12
+; GFX1032-NEXT:    s_mul_i32 s10, s10, s8
+; GFX1032-NEXT:    s_mul_i32 s9, s9, s5
+; GFX1032-NEXT:    s_mul_hi_u32 s12, s8, s11
+; GFX1032-NEXT:    s_add_i32 s9, s13, s9
+; GFX1032-NEXT:    s_mul_hi_u32 s13, s5, s11
+; GFX1032-NEXT:    s_add_i32 s9, s9, s10
+; GFX1032-NEXT:    s_mul_i32 s10, s5, s11
+; GFX1032-NEXT:    s_mul_i32 s15, s8, s9
+; GFX1032-NEXT:    s_mul_hi_u32 s14, s8, s9
+; GFX1032-NEXT:    s_add_u32 s12, s12, s15
+; GFX1032-NEXT:    s_addc_u32 s14, 0, s14
+; GFX1032-NEXT:    s_mul_hi_u32 s11, s5, s9
+; GFX1032-NEXT:    s_add_u32 s10, s12, s10
+; GFX1032-NEXT:    s_mul_i32 s9, s5, s9
+; GFX1032-NEXT:    s_addc_u32 s10, s14, s13
+; GFX1032-NEXT:    s_addc_u32 s11, s11, 0
+; GFX1032-NEXT:    s_add_u32 s9, s10, s9
+; GFX1032-NEXT:    s_addc_u32 s10, 0, s11
+; GFX1032-NEXT:    s_add_i32 s8, s8, s9
+; GFX1032-NEXT:    s_cselect_b32 s9, 1, 0
+; GFX1032-NEXT:    s_mul_hi_u32 s11, s2, s8
+; GFX1032-NEXT:    s_cmp_lg_u32 s9, 0
+; GFX1032-NEXT:    s_mul_hi_u32 s9, s3, s8
+; GFX1032-NEXT:    s_addc_u32 s5, s5, s10
+; GFX1032-NEXT:    s_mul_i32 s8, s3, s8
+; GFX1032-NEXT:    s_mul_i32 s12, s2, s5
+; GFX1032-NEXT:    s_mul_hi_u32 s10, s2, s5
+; GFX1032-NEXT:    s_add_u32 s11, s11, s12
+; GFX1032-NEXT:    s_addc_u32 s10, 0, s10
+; GFX1032-NEXT:    s_mul_hi_u32 s13, s3, s5
+; GFX1032-NEXT:    s_add_u32 s8, s11, s8
+; GFX1032-NEXT:    s_mul_i32 s5, s3, s5
+; GFX1032-NEXT:    s_addc_u32 s8, s10, s9
+; GFX1032-NEXT:    s_addc_u32 s9, s13, 0
+; GFX1032-NEXT:    s_add_u32 s5, s8, s5
+; GFX1032-NEXT:    s_addc_u32 s8, 0, s9
+; GFX1032-NEXT:    s_mul_hi_u32 s9, s0, s5
+; GFX1032-NEXT:    s_mul_i32 s10, s0, s8
+; GFX1032-NEXT:    s_mul_i32 s11, s1, s5
+; GFX1032-NEXT:    s_add_i32 s9, s9, s10
+; GFX1032-NEXT:    s_mul_i32 s10, s0, s5
+; GFX1032-NEXT:    s_add_i32 s9, s9, s11
+; GFX1032-NEXT:    s_sub_i32 s11, s3, s9
+; GFX1032-NEXT:    s_sub_i32 s10, s2, s10
+; GFX1032-NEXT:    s_cselect_b32 s12, 1, 0
 ; GFX1032-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX1032-NEXT:    v_cmp_le_u32_e32 vcc_lo, s4, v1
-; GFX1032-NEXT:    s_subb_u32 s10, s10, 0
-; GFX1032-NEXT:    s_cmp_ge_u32 s10, s5
-; GFX1032-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc_lo
-; GFX1032-NEXT:    s_cselect_b32 s12, -1, 0
-; GFX1032-NEXT:    s_cmp_eq_u32 s10, s5
-; GFX1032-NEXT:    s_cselect_b32 vcc_lo, -1, 0
-; GFX1032-NEXT:    s_add_u32 s10, s1, 1
-; GFX1032-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX1032-NEXT:    s_addc_u32 s12, s9, 0
-; GFX1032-NEXT:    s_add_u32 s13, s1, 2
-; GFX1032-NEXT:    s_addc_u32 s14, s9, 0
+; GFX1032-NEXT:    s_subb_u32 s11, s11, s1
+; GFX1032-NEXT:    s_sub_i32 s13, s10, s0
+; GFX1032-NEXT:    s_cselect_b32 s14, 1, 0
+; GFX1032-NEXT:    s_cmp_lg_u32 s14, 0
+; GFX1032-NEXT:    s_subb_u32 s11, s11, 0
+; GFX1032-NEXT:    s_cmp_ge_u32 s11, s1
+; GFX1032-NEXT:    s_cselect_b32 s14, -1, 0
+; GFX1032-NEXT:    s_cmp_ge_u32 s13, s0
+; GFX1032-NEXT:    s_cselect_b32 s13, -1, 0
+; GFX1032-NEXT:    s_cmp_eq_u32 s11, s1
+; GFX1032-NEXT:    s_cselect_b32 s11, s13, s14
+; GFX1032-NEXT:    s_add_u32 s13, s5, 1
+; GFX1032-NEXT:    s_addc_u32 s14, s8, 0
+; GFX1032-NEXT:    s_add_u32 s15, s5, 2
+; GFX1032-NEXT:    s_addc_u32 s16, s8, 0
 ; GFX1032-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX1032-NEXT:    v_cmp_le_u32_e32 vcc_lo, s4, v0
-; GFX1032-NEXT:    s_subb_u32 s0, s7, s0
-; GFX1032-NEXT:    v_mov_b32_e32 v2, s13
-; GFX1032-NEXT:    s_cmp_ge_u32 s0, s5
-; GFX1032-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc_lo
-; GFX1032-NEXT:    s_cselect_b32 s7, -1, 0
-; GFX1032-NEXT:    s_cmp_eq_u32 s0, s5
-; GFX1032-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v1
-; GFX1032-NEXT:    s_cselect_b32 s0, -1, 0
-; GFX1032-NEXT:    v_mov_b32_e32 v1, s14
-; GFX1032-NEXT:    v_cndmask_b32_e64 v0, s7, v0, s0
-; GFX1032-NEXT:    v_cndmask_b32_e32 v2, s10, v2, vcc_lo
-; GFX1032-NEXT:    v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX1032-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT:    v_cndmask_b32_e32 v1, s9, v1, vcc_lo
-; GFX1032-NEXT:    v_cndmask_b32_e32 v0, s1, v2, vcc_lo
-; GFX1032-NEXT:    s_andn2_b32 vcc_lo, exec_lo, s8
+; GFX1032-NEXT:    s_cselect_b32 s11, s15, s13
+; GFX1032-NEXT:    s_cselect_b32 s13, s16, s14
+; GFX1032-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX1032-NEXT:    s_subb_u32 s3, s3, s9
+; GFX1032-NEXT:    s_cmp_ge_u32 s3, s1
+; GFX1032-NEXT:    s_cselect_b32 s9, -1, 0
+; GFX1032-NEXT:    s_cmp_ge_u32 s10, s0
+; GFX1032-NEXT:    s_cselect_b32 s10, -1, 0
+; GFX1032-NEXT:    s_cmp_eq_u32 s3, s1
+; GFX1032-NEXT:    s_cselect_b32 s1, s10, s9
+; GFX1032-NEXT:    s_cmp_lg_u32 s1, 0
+; GFX1032-NEXT:    s_cselect_b32 s9, s13, s8
+; GFX1032-NEXT:    s_cselect_b32 s8, s11, s5
+; GFX1032-NEXT:    s_andn2_b32 vcc_lo, exec_lo, s4
 ; GFX1032-NEXT:    s_cbranch_vccnz .LBB15_3
 ; GFX1032-NEXT:  .LBB15_2:
-; GFX1032-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX1032-NEXT:    s_sub_i32 s1, 0, s4
+; GFX1032-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GFX1032-NEXT:    s_sub_i32 s3, 0, s0
+; GFX1032-NEXT:    s_mov_b32 s9, 0
 ; GFX1032-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX1032-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX1032-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1032-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1032-NEXT:    s_mul_i32 s1, s1, s0
-; GFX1032-NEXT:    s_mul_hi_u32 s1, s0, s1
-; GFX1032-NEXT:    s_add_i32 s0, s0, s1
-; GFX1032-NEXT:    s_mul_hi_u32 s0, s6, s0
-; GFX1032-NEXT:    s_mul_i32 s1, s0, s4
-; GFX1032-NEXT:    s_add_i32 s5, s0, 1
-; GFX1032-NEXT:    s_sub_i32 s1, s6, s1
-; GFX1032-NEXT:    s_sub_i32 s6, s1, s4
-; GFX1032-NEXT:    s_cmp_ge_u32 s1, s4
-; GFX1032-NEXT:    s_cselect_b32 s0, s5, s0
-; GFX1032-NEXT:    s_cselect_b32 s1, s6, s1
-; GFX1032-NEXT:    s_add_i32 s5, s0, 1
-; GFX1032-NEXT:    s_cmp_ge_u32 s1, s4
-; GFX1032-NEXT:    s_mov_b32 s1, 0
-; GFX1032-NEXT:    s_cselect_b32 s0, s5, s0
-; GFX1032-NEXT:    v_mov_b32_e32 v0, s0
-; GFX1032-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1032-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX1032-NEXT:    s_mul_i32 s3, s3, s1
+; GFX1032-NEXT:    s_mul_hi_u32 s3, s1, s3
+; GFX1032-NEXT:    s_add_i32 s1, s1, s3
+; GFX1032-NEXT:    s_mul_hi_u32 s1, s2, s1
+; GFX1032-NEXT:    s_mul_i32 s3, s1, s0
+; GFX1032-NEXT:    s_sub_i32 s2, s2, s3
+; GFX1032-NEXT:    s_add_i32 s3, s1, 1
+; GFX1032-NEXT:    s_sub_i32 s4, s2, s0
+; GFX1032-NEXT:    s_cmp_ge_u32 s2, s0
+; GFX1032-NEXT:    s_cselect_b32 s1, s3, s1
+; GFX1032-NEXT:    s_cselect_b32 s2, s4, s2
+; GFX1032-NEXT:    s_add_i32 s3, s1, 1
+; GFX1032-NEXT:    s_cmp_ge_u32 s2, s0
+; GFX1032-NEXT:    s_cselect_b32 s8, s3, s1
 ; GFX1032-NEXT:  .LBB15_3:
+; GFX1032-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX1032-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1032-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3] offset:16
+; GFX1032-NEXT:    v_mov_b32_e32 v1, s9
+; GFX1032-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7] offset:16
 ; GFX1032-NEXT:    s_endpgm
 ; GFX1032-NEXT:  .LBB15_4:
-; GFX1032-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GFX1032-NEXT:    ; implicit-def: $sgpr8_sgpr9
 ; GFX1032-NEXT:    s_branch .LBB15_2
 ;
 ; GFX1064-LABEL: test_udiv64:
 ; GFX1064:       ; %bb.0: ; %bb
-; GFX1064-NEXT:    s_load_dwordx2 s[2:3], s[4:5], 0x24
+; GFX1064-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x24
 ; GFX1064-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT:    s_load_dwordx4 s[4:7], s[2:3], 0x0
+; GFX1064-NEXT:    s_load_dwordx4 s[0:3], s[6:7], 0x0
 ; GFX1064-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT:    s_or_b64 s[0:1], s[6:7], s[4:5]
-; GFX1064-NEXT:    s_mov_b32 s0, 0
-; GFX1064-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX1064-NEXT:    s_or_b64 s[4:5], s[2:3], s[0:1]
+; GFX1064-NEXT:    s_mov_b32 s4, 0
+; GFX1064-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; GFX1064-NEXT:    s_cbranch_scc0 .LBB15_4
 ; GFX1064-NEXT:  ; %bb.1:
-; GFX1064-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX1064-NEXT:    v_cvt_f32_u32_e32 v1, s5
-; GFX1064-NEXT:    s_sub_u32 s9, 0, s4
-; GFX1064-NEXT:    s_subb_u32 s10, 0, s5
+; GFX1064-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GFX1064-NEXT:    v_cvt_f32_u32_e32 v1, s1
+; GFX1064-NEXT:    s_sub_u32 s9, 0, s0
+; GFX1064-NEXT:    s_subb_u32 s10, 0, s1
 ; GFX1064-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX1064-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX1064-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -914,141 +912,139 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
 ; GFX1064-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX1064-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX1064-NEXT:    v_readfirstlane_b32 s8, v1
-; GFX1064-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1064-NEXT:    s_mul_i32 s1, s9, s8
-; GFX1064-NEXT:    s_mul_hi_u32 s12, s9, s0
-; GFX1064-NEXT:    s_mul_i32 s11, s10, s0
-; GFX1064-NEXT:    s_add_i32 s1, s12, s1
-; GFX1064-NEXT:    s_mul_i32 s13, s9, s0
-; GFX1064-NEXT:    s_add_i32 s1, s1, s11
-; GFX1064-NEXT:    s_mul_hi_u32 s12, s0, s13
+; GFX1064-NEXT:    v_readfirstlane_b32 s4, v0
+; GFX1064-NEXT:    s_mul_i32 s5, s9, s8
+; GFX1064-NEXT:    s_mul_hi_u32 s12, s9, s4
+; GFX1064-NEXT:    s_mul_i32 s11, s10, s4
+; GFX1064-NEXT:    s_add_i32 s5, s12, s5
+; GFX1064-NEXT:    s_mul_i32 s13, s9, s4
+; GFX1064-NEXT:    s_add_i32 s5, s5, s11
+; GFX1064-NEXT:    s_mul_hi_u32 s12, s4, s13
+; GFX1064-NEXT:    s_mul_i32 s15, s4, s5
 ; GFX1064-NEXT:    s_mul_hi_u32 s14, s8, s13
 ; GFX1064-NEXT:    s_mul_i32 s11, s8, s13
-; GFX1064-NEXT:    s_mul_hi_u32 s13, s0, s1
-; GFX1064-NEXT:    s_mul_i32 s0, s0, s1
-; GFX1064-NEXT:    s_mul_hi_u32 s15, s8, s1
-; GFX1064-NEXT:    s_add_u32 s0, s12, s0
-; GFX1064-NEXT:    s_addc_u32 s12, 0, s13
-; GFX1064-NEXT:    s_add_u32 s0, s0, s11
-; GFX1064-NEXT:    s_mul_i32 s1, s8, s1
-; GFX1064-NEXT:    s_addc_u32 s0, s12, s14
-; GFX1064-NEXT:    s_addc_u32 s11, s15, 0
-; GFX1064-NEXT:    s_add_u32 s0, s0, s1
-; GFX1064-NEXT:    s_addc_u32 s11, 0, s11
-; GFX1064-NEXT:    v_add_co_u32 v0, s[0:1], v0, s0
-; GFX1064-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX1064-NEXT:    s_mul_hi_u32 s13, s4, s5
+; GFX1064-NEXT:    s_add_u32 s12, s12, s15
+; GFX1064-NEXT:    s_addc_u32 s13, 0, s13
+; GFX1064-NEXT:    s_mul_hi_u32 s16, s8, s5
+; GFX1064-NEXT:    s_add_u32 s11, s12, s11
+; GFX1064-NEXT:    s_mul_i32 s5, s8, s5
+; GFX1064-NEXT:    s_addc_u32 s11, s13, s14
+; GFX1064-NEXT:    s_addc_u32 s12, s16, 0
+; GFX1064-NEXT:    s_add_u32 s5, s11, s5
+; GFX1064-NEXT:    s_addc_u32 s11, 0, s12
+; GFX1064-NEXT:    s_add_i32 s12, s4, s5
+; GFX1064-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX1064-NEXT:    s_mul_hi_u32 s13, s9, s12
+; GFX1064-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX1064-NEXT:    s_mul_i32 s4, s9, s12
 ; GFX1064-NEXT:    s_addc_u32 s8, s8, s11
-; GFX1064-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1064-NEXT:    s_mul_i32 s1, s9, s8
-; GFX1064-NEXT:    s_mul_hi_u32 s11, s9, s0
-; GFX1064-NEXT:    s_mul_i32 s10, s10, s0
-; GFX1064-NEXT:    s_add_i32 s1, s11, s1
-; GFX1064-NEXT:    s_mul_i32 s9, s9, s0
-; GFX1064-NEXT:    s_add_i32 s1, s1, s10
-; GFX1064-NEXT:    s_mul_hi_u32 s11, s8, s9
-; GFX1064-NEXT:    s_mul_i32 s12, s8, s9
-; GFX1064-NEXT:    s_mul_hi_u32 s9, s0, s9
-; GFX1064-NEXT:    s_mul_hi_u32 s13, s0, s1
-; GFX1064-NEXT:    s_mul_i32 s0, s0, s1
-; GFX1064-NEXT:    s_mul_hi_u32 s10, s8, s1
-; GFX1064-NEXT:    s_add_u32 s0, s9, s0
-; GFX1064-NEXT:    s_addc_u32 s9, 0, s13
-; GFX1064-NEXT:    s_add_u32 s0, s0, s12
-; GFX1064-NEXT:    s_mul_i32 s1, s8, s1
-; GFX1064-NEXT:    s_addc_u32 s0, s9, s11
-; GFX1064-NEXT:    s_addc_u32 s9, s10, 0
-; GFX1064-NEXT:    s_add_u32 s0, s0, s1
+; GFX1064-NEXT:    s_mul_i32 s10, s10, s12
+; GFX1064-NEXT:    s_mul_i32 s9, s9, s8
+; GFX1064-NEXT:    s_mul_hi_u32 s5, s12, s4
+; GFX1064-NEXT:    s_add_i32 s9, s13, s9
+; GFX1064-NEXT:    s_mul_hi_u32 s11, s8, s4
+; GFX1064-NEXT:    s_add_i32 s9, s9, s10
+; GFX1064-NEXT:    s_mul_i32 s4, s8, s4
+; GFX1064-NEXT:    s_mul_i32 s14, s12, s9
+; GFX1064-NEXT:    s_mul_hi_u32 s13, s12, s9
+; GFX1064-NEXT:    s_add_u32 s5, s5, s14
+; GFX1064-NEXT:    s_addc_u32 s13, 0, s13
+; GFX1064-NEXT:    s_mul_hi_u32 s10, s8, s9
+; GFX1064-NEXT:    s_add_u32 s4, s5, s4
+; GFX1064-NEXT:    s_mul_i32 s9, s8, s9
+; GFX1064-NEXT:    s_addc_u32 s4, s13, s11
+; GFX1064-NEXT:    s_addc_u32 s5, s10, 0
+; GFX1064-NEXT:    s_add_u32 s4, s4, s9
+; GFX1064-NEXT:    s_addc_u32 s9, 0, s5
+; GFX1064-NEXT:    s_add_i32 s12, s12, s4
+; GFX1064-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX1064-NEXT:    s_mul_hi_u32 s10, s2, s12
+; GFX1064-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX1064-NEXT:    s_mul_hi_u32 s4, s3, s12
+; GFX1064-NEXT:    s_addc_u32 s5, s8, s9
+; GFX1064-NEXT:    s_mul_i32 s8, s3, s12
+; GFX1064-NEXT:    s_mul_i32 s11, s2, s5
+; GFX1064-NEXT:    s_mul_hi_u32 s9, s2, s5
+; GFX1064-NEXT:    s_add_u32 s10, s10, s11
 ; GFX1064-NEXT:    s_addc_u32 s9, 0, s9
-; GFX1064-NEXT:    v_add_co_u32 v0, s[0:1], v0, s0
-; GFX1064-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT:    s_addc_u32 s0, s8, s9
-; GFX1064-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX1064-NEXT:    s_mul_i32 s9, s6, s0
-; GFX1064-NEXT:    s_mul_hi_u32 s8, s6, s0
-; GFX1064-NEXT:    s_mul_hi_u32 s10, s7, s0
-; GFX1064-NEXT:    s_mul_i32 s0, s7, s0
-; GFX1064-NEXT:    s_mul_hi_u32 s11, s6, s1
-; GFX1064-NEXT:    s_mul_hi_u32 s12, s7, s1
-; GFX1064-NEXT:    s_mul_i32 s1, s7, s1
-; GFX1064-NEXT:    s_add_u32 s9, s11, s9
-; GFX1064-NEXT:    s_addc_u32 s8, 0, s8
-; GFX1064-NEXT:    s_add_u32 s1, s9, s1
-; GFX1064-NEXT:    s_addc_u32 s1, s8, s12
-; GFX1064-NEXT:    s_addc_u32 s8, s10, 0
-; GFX1064-NEXT:    s_add_u32 s10, s1, s0
+; GFX1064-NEXT:    s_mul_hi_u32 s12, s3, s5
+; GFX1064-NEXT:    s_add_u32 s8, s10, s8
+; GFX1064-NEXT:    s_mul_i32 s5, s3, s5
+; GFX1064-NEXT:    s_addc_u32 s4, s9, s4
+; GFX1064-NEXT:    s_addc_u32 s8, s12, 0
+; GFX1064-NEXT:    s_add_u32 s10, s4, s5
 ; GFX1064-NEXT:    s_addc_u32 s11, 0, s8
-; GFX1064-NEXT:    s_mul_hi_u32 s0, s4, s10
-; GFX1064-NEXT:    s_mul_i32 s1, s4, s11
-; GFX1064-NEXT:    s_mul_i32 s9, s4, s10
-; GFX1064-NEXT:    s_add_i32 s12, s0, s1
-; GFX1064-NEXT:    v_sub_co_u32 v0, s[0:1], s6, s9
-; GFX1064-NEXT:    s_mul_i32 s8, s5, s10
-; GFX1064-NEXT:    s_add_i32 s12, s12, s8
-; GFX1064-NEXT:    v_sub_co_u32 v1, s[8:9], v0, s4
-; GFX1064-NEXT:    s_sub_i32 s13, s7, s12
-; GFX1064-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT:    s_subb_u32 s13, s13, s5
+; GFX1064-NEXT:    s_mul_hi_u32 s4, s0, s10
+; GFX1064-NEXT:    s_mul_i32 s5, s0, s11
+; GFX1064-NEXT:    s_mul_i32 s8, s1, s10
+; GFX1064-NEXT:    s_add_i32 s4, s4, s5
+; GFX1064-NEXT:    s_add_i32 s12, s4, s8
+; GFX1064-NEXT:    s_mul_i32 s4, s0, s10
+; GFX1064-NEXT:    s_sub_i32 s8, s3, s12
+; GFX1064-NEXT:    s_sub_i32 s13, s2, s4
+; GFX1064-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX1064-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX1064-NEXT:    s_subb_u32 s14, s8, s1
+; GFX1064-NEXT:    s_sub_i32 s15, s13, s0
+; GFX1064-NEXT:    s_cselect_b64 s[8:9], 1, 0
 ; GFX1064-NEXT:    s_cmp_lg_u64 s[8:9], 0
-; GFX1064-NEXT:    v_cmp_le_u32_e32 vcc, s4, v1
-; GFX1064-NEXT:    s_subb_u32 s8, s13, 0
-; GFX1064-NEXT:    s_cmp_ge_u32 s8, s5
-; GFX1064-NEXT:    v_cndmask_b32_e64 v1, 0, -1, vcc
+; GFX1064-NEXT:    s_subb_u32 s8, s14, 0
+; GFX1064-NEXT:    s_cmp_ge_u32 s8, s1
 ; GFX1064-NEXT:    s_cselect_b32 s9, -1, 0
-; GFX1064-NEXT:    s_cmp_eq_u32 s8, s5
-; GFX1064-NEXT:    s_cselect_b64 vcc, -1, 0
-; GFX1064-NEXT:    s_add_u32 s8, s10, 1
-; GFX1064-NEXT:    v_cndmask_b32_e32 v1, s9, v1, vcc
-; GFX1064-NEXT:    s_addc_u32 s9, s11, 0
-; GFX1064-NEXT:    s_add_u32 s13, s10, 2
+; GFX1064-NEXT:    s_cmp_ge_u32 s15, s0
+; GFX1064-NEXT:    s_cselect_b32 s14, -1, 0
+; GFX1064-NEXT:    s_cmp_eq_u32 s8, s1
+; GFX1064-NEXT:    s_cselect_b32 s8, s14, s9
+; GFX1064-NEXT:    s_add_u32 s9, s10, 1
 ; GFX1064-NEXT:    s_addc_u32 s14, s11, 0
-; GFX1064-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; GFX1064-NEXT:    v_cmp_le_u32_e32 vcc, s4, v0
-; GFX1064-NEXT:    s_subb_u32 s0, s7, s12
-; GFX1064-NEXT:    v_mov_b32_e32 v2, s13
-; GFX1064-NEXT:    s_cmp_ge_u32 s0, s5
-; GFX1064-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; GFX1064-NEXT:    s_cselect_b32 s7, -1, 0
-; GFX1064-NEXT:    s_cmp_eq_u32 s0, s5
-; GFX1064-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
-; GFX1064-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; GFX1064-NEXT:    v_mov_b32_e32 v1, s14
-; GFX1064-NEXT:    v_cndmask_b32_e64 v0, s7, v0, s[0:1]
-; GFX1064-NEXT:    v_cndmask_b32_e32 v2, s8, v2, vcc
-; GFX1064-NEXT:    v_cndmask_b32_e32 v1, s9, v1, vcc
-; GFX1064-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GFX1064-NEXT:    v_cndmask_b32_e32 v1, s11, v1, vcc
-; GFX1064-NEXT:    v_cndmask_b32_e32 v0, s10, v2, vcc
+; GFX1064-NEXT:    s_add_u32 s15, s10, 2
+; GFX1064-NEXT:    s_addc_u32 s16, s11, 0
+; GFX1064-NEXT:    s_cmp_lg_u32 s8, 0
+; GFX1064-NEXT:    s_cselect_b32 s15, s15, s9
+; GFX1064-NEXT:    s_cselect_b32 s14, s16, s14
+; GFX1064-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX1064-NEXT:    s_subb_u32 s3, s3, s12
+; GFX1064-NEXT:    s_cmp_ge_u32 s3, s1
+; GFX1064-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX1064-NEXT:    s_cmp_ge_u32 s13, s0
+; GFX1064-NEXT:    s_cselect_b32 s5, -1, 0
+; GFX1064-NEXT:    s_cmp_eq_u32 s3, s1
+; GFX1064-NEXT:    s_cselect_b32 s1, s5, s4
+; GFX1064-NEXT:    s_cmp_lg_u32 s1, 0
+; GFX1064-NEXT:    s_cselect_b32 s5, s14, s11
+; GFX1064-NEXT:    s_cselect_b32 s4, s15, s10
 ; GFX1064-NEXT:    s_cbranch_execnz .LBB15_3
 ; GFX1064-NEXT:  .LBB15_2:
-; GFX1064-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX1064-NEXT:    s_sub_i32 s1, 0, s4
+; GFX1064-NEXT:    v_cvt_f32_u32_e32 v0, s0
+; GFX1064-NEXT:    s_sub_i32 s3, 0, s0
+; GFX1064-NEXT:    s_mov_b32 s5, 0
 ; GFX1064-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX1064-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX1064-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1064-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX1064-NEXT:    s_mul_i32 s1, s1, s0
-; GFX1064-NEXT:    s_mul_hi_u32 s1, s0, s1
-; GFX1064-NEXT:    s_add_i32 s0, s0, s1
-; GFX1064-NEXT:    s_mul_hi_u32 s0, s6, s0
-; GFX1064-NEXT:    s_mul_i32 s1, s0, s4
-; GFX1064-NEXT:    s_add_i32 s5, s0, 1
-; GFX1064-NEXT:    s_sub_i32 s1, s6, s1
-; GFX1064-NEXT:    s_sub_i32 s6, s1, s4
-; GFX1064-NEXT:    s_cmp_ge_u32 s1, s4
-; GFX1064-NEXT:    s_cselect_b32 s0, s5, s0
-; GFX1064-NEXT:    s_cselect_b32 s1, s6, s1
-; GFX1064-NEXT:    s_add_i32 s5, s0, 1
-; GFX1064-NEXT:    s_cmp_ge_u32 s1, s4
-; GFX1064-NEXT:    s_mov_b32 s1, 0
-; GFX1064-NEXT:    s_cselect_b32 s0, s5, s0
-; GFX1064-NEXT:    v_mov_b32_e32 v0, s0
-; GFX1064-NEXT:    v_mov_b32_e32 v1, s1
+; GFX1064-NEXT:    v_readfirstlane_b32 s1, v0
+; GFX1064-NEXT:    s_mul_i32 s3, s3, s1
+; GFX1064-NEXT:    s_mul_hi_u32 s3, s1, s3
+; GFX1064-NEXT:    s_add_i32 s1, s1, s3
+; GFX1064-NEXT:    s_mul_hi_u32 s1, s2, s1
+; GFX1064-NEXT:    s_mul_i32 s3, s1, s0
+; GFX1064-NEXT:    s_sub_i32 s2, s2, s3
+; GFX1064-NEXT:    s_add_i32 s3, s1, 1
+; GFX1064-NEXT:    s_sub_i32 s4, s2, s0
+; GFX1064-NEXT:    s_cmp_ge_u32 s2, s0
+; GFX1064-NEXT:    s_cselect_b32 s1, s3, s1
+; GFX1064-NEXT:    s_cselect_b32 s2, s4, s2
+; GFX1064-NEXT:    s_add_i32 s3, s1, 1
+; GFX1064-NEXT:    s_cmp_ge_u32 s2, s0
+; GFX1064-NEXT:    s_cselect_b32 s4, s3, s1
 ; GFX1064-NEXT:  .LBB15_3:
+; GFX1064-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX1064-NEXT:    v_mov_b32_e32 v2, 0
-; GFX1064-NEXT:    global_store_dwordx2 v2, v[0:1], s[2:3] offset:16
+; GFX1064-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1064-NEXT:    global_store_dwordx2 v2, v[0:1], s[6:7] offset:16
 ; GFX1064-NEXT:    s_endpgm
 ; GFX1064-NEXT:  .LBB15_4:
-; GFX1064-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GFX1064-NEXT:    ; implicit-def: $sgpr4_sgpr5
 ; GFX1064-NEXT:    s_branch .LBB15_2
 bb:
   %tmp = getelementptr inbounds i64, ptr addrspace(1) %arg, i64 1

>From b28899d76d92e054e13e6bd4c5120b3f5346d6ed Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 15 Sep 2025 10:01:17 -0500
Subject: [PATCH 8/9] Do not generate S_CMP if add/sub carryout is available

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  120 +-
 llvm/test/CodeGen/AMDGPU/addsub64_carry.ll    |  109 +-
 .../AMDGPU/amdgpu-codegenprepare-idiv.ll      | 1908 ++++++++---------
 .../test/CodeGen/AMDGPU/carryout-selection.ll |  715 +++---
 .../expand-scalar-carry-out-select-user.ll    |   12 +-
 llvm/test/CodeGen/AMDGPU/sdiv64.ll            |  434 ++--
 llvm/test/CodeGen/AMDGPU/srem.ll              |  822 ++++---
 llvm/test/CodeGen/AMDGPU/srem64.ll            |  697 +++---
 llvm/test/CodeGen/AMDGPU/uaddo.ll             |  167 +-
 llvm/test/CodeGen/AMDGPU/udiv64.ll            |  320 ++-
 llvm/test/CodeGen/AMDGPU/urem64.ll            |  530 +++--
 llvm/test/CodeGen/AMDGPU/usubo.ll             |  167 +-
 llvm/test/CodeGen/AMDGPU/wave32.ll            |  190 +-
 13 files changed, 2916 insertions(+), 3275 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 504649173f977..fbacccb5efd3a 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5952,9 +5952,9 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     MachineOperand &Src0 = MI.getOperand(2);
     MachineOperand &Src1 = MI.getOperand(3);
     MachineOperand &Src2 = MI.getOperand(4);
-    unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
-                       ? AMDGPU::S_ADDC_U32
-                       : AMDGPU::S_SUBB_U32;
+
+    bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO);
+
     if (Src0.isReg() && TRI->isVectorRegister(MRI, Src0.getReg())) {
       Register RegOp0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
       BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp0)
@@ -5978,42 +5978,83 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     unsigned WaveSize = TRI->getRegSizeInBits(*Src2RC);
     assert(WaveSize == 64 || WaveSize == 32);
 
-    if (WaveSize == 64) {
-      if (ST.hasScalarCompareEq64()) {
-        BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64))
-            .addReg(Src2.getReg())
-            .addImm(0);
-      } else {
-        const TargetRegisterClass *SubRC =
-            TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
-        MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm(
-            MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC);
-        MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm(
-            MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC);
-        Register Src2_32 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-
-        BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32)
-            .add(Src2Sub0)
-            .add(Src2Sub1);
+    unsigned SelOpc =
+        (WaveSize == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
+    unsigned AddcSubbOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
+    unsigned AddSubOpc = IsAdd ? AMDGPU::S_ADD_I32 : AMDGPU::S_SUB_I32;
+    //  Lowering for:
+    //
+    //    S_UADDO_PSEUDO|S_ADD_CO_PSEUDO
+    //    <no SCC def code>
+    //    S_ADD_CO_PSEUDO
+    //
+    //  produces:
+    //
+    //    S_ADD_I32|S_ADDC_U32                  ; lowered from S_UADDO_PSEUDO
+    //    SREG = S_CSELECT_B32|64 [1,-1], 0     ; lowered from S_UADDO_PSEUDO
+    //    <no SCC def code>
+    //    S_CMP32|64 SREG, 0                    ; lowered from S_ADD_CO_PSEUDO
+    //    S_ADDC_U32                            ; lowered from S_ADD_CO_PSEUDO
+    //
+    //  At this point before generating the S_CMP check if it is redundant.  If
+    //  so do not recalculate it.  Subsequent optimizations will also delete the
+    //  dead S_CSELECT*.
+
+    bool RecalculateSCC{true};
+    MachineInstr *Def = MRI.getVRegDef(Src2.getReg());
+    if (Def && Def->getParent() == BB && Def->getOpcode() == SelOpc &&
+        Def->getOperand(1).isImm() && Def->getOperand(1).getImm() != 0 &&
+        Def->getOperand(2).isImm() && Def->getOperand(2).getImm() == 0) {
+
+      auto I1 = std::next(MachineBasicBlock::reverse_iterator(Def));
+      if (I1 != BB->rend() &&
+          (I1->getOpcode() == AddSubOpc || I1->getOpcode() == AddcSubbOpc)) {
+        RecalculateSCC = false;
+        // Ensure there are no intervening definitions of SCC.
+        for (auto I2 = std::next(MachineBasicBlock::reverse_iterator(MI));
+             I2 != I1; I2++) {
+          if (I2->definesRegister(AMDGPU::SCC, TRI)) {
+            RecalculateSCC = true;
+            break;
+          }
+        }
+      }
+    }
+
+    if (RecalculateSCC) {
+      if (WaveSize == 64) {
+        if (ST.hasScalarCompareEq64()) {
+          BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64))
+              .addReg(Src2.getReg())
+              .addImm(0);
+        } else {
+          const TargetRegisterClass *SubRC =
+              TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
+          MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm(
+              MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC);
+          MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm(
+              MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC);
+          Register Src2_32 =
+              MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
 
+          BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32)
+              .add(Src2Sub0)
+              .add(Src2Sub1);
+
+          BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
+              .addReg(Src2_32, RegState::Kill)
+              .addImm(0);
+        }
+      } else {
         BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
-            .addReg(Src2_32, RegState::Kill)
+            .addReg(Src2.getReg())
             .addImm(0);
       }
-    } else {
-      BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
-          .addReg(Src2.getReg())
-          .addImm(0);
     }
 
-    // clang-format off
-    BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg())
+    BuildMI(*BB, MII, DL, TII->get(AddcSubbOpc), Dest.getReg())
         .add(Src0)
         .add(Src1);
-    // clang-format on
-
-    unsigned SelOpc =
-        (WaveSize == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
 
     BuildMI(*BB, MII, DL, TII->get(SelOpc), CarryDest.getReg())
         .addImm(-1)
@@ -16308,17 +16349,12 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
   // LHS = ADD i64 Y, 1            LHSlo = UADDO       i32 Ylo, 1
   // setcc LHS eq 0        ->      LHSHi = UADDO_CARRY i32 Yhi, 0
 
-  // Don't split a 64-bit add/sub into two 32-bit add/sub instructions for
-  // non-divergent operations.  This can result in lo/hi 32-bit operations
-  // being done in SGPR and VGPR with additional operations being needed
-  // to move operands and/or generate the intermediate carry.
-  if (VT == MVT::i64 && N->isDivergent() &&
-      ((CC == ISD::SETULT &&
-        sd_match(LHS, m_Add(m_Specific(RHS), m_Value()))) ||
-       (CC == ISD::SETUGT &&
-        sd_match(LHS, m_Sub(m_Specific(RHS), m_Value()))) ||
-       (CC == ISD::SETEQ && CRHS && CRHS->isZero() &&
-        sd_match(LHS, m_Add(m_Value(), m_One()))))) {
+  if (VT == MVT::i64 && ((CC == ISD::SETULT &&
+                          sd_match(LHS, m_Add(m_Specific(RHS), m_Value()))) ||
+                         (CC == ISD::SETUGT &&
+                          sd_match(LHS, m_Sub(m_Specific(RHS), m_Value()))) ||
+                         (CC == ISD::SETEQ && CRHS && CRHS->isZero() &&
+                          sd_match(LHS, m_Add(m_Value(), m_One()))))) {
     EVT TargetType = MVT::i32;
     EVT CarryVT = MVT::i1;
     bool IsAdd = LHS.getOpcode() == ISD::ADD;
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
index 397835972e4d4..9d9fd947d2817 100644
--- a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
+++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
@@ -179,15 +179,9 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) {
 define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B, i32 inreg %val32) {
 ; CHECK-LABEL: s_add64_32:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s6, s0, s2
-; CHECK-NEXT:    v_mov_b32_e32 v0, s0
-; CHECK-NEXT:    s_addc_u32 s7, s1, s3
-; CHECK-NEXT:    v_mov_b32_e32 v1, s1
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; CHECK-NEXT:    s_mov_b32 s0, s6
-; CHECK-NEXT:    s_cmp_lg_u64 vcc, 0
+; CHECK-NEXT:    s_add_i32 s0, s0, s2
+; CHECK-NEXT:    s_addc_u32 s1, s1, s3
 ; CHECK-NEXT:    s_addc_u32 s2, s4, 0
-; CHECK-NEXT:    s_mov_b32 s1, s7
 ; CHECK-NEXT:    ; return to shader part epilog
   %sum64 = add i64 %val64A, %val64B
   %obit = icmp ult i64 %sum64, %val64A
@@ -201,22 +195,18 @@ define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B
 define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_uadd_v2i64:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s6, s2, s6
-; CHECK-NEXT:    v_mov_b32_e32 v9, s3
+; CHECK-NEXT:    s_add_i32 s6, s2, s6
 ; CHECK-NEXT:    s_addc_u32 s7, s3, s7
-; CHECK-NEXT:    v_mov_b32_e32 v8, s2
-; CHECK-NEXT:    s_add_u32 s4, s0, s4
-; CHECK-NEXT:    v_mov_b32_e32 v7, s1
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9]
-; CHECK-NEXT:    s_addc_u32 s5, s1, s5
-; CHECK-NEXT:    v_mov_b32_e32 v6, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7]
-; CHECK-NEXT:    v_readfirstlane_b32 s2, v8
-; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; CHECK-NEXT:    v_readfirstlane_b32 s0, v6
-; CHECK-NEXT:    v_mov_b32_e32 v2, s4
-; CHECK-NEXT:    v_mov_b32_e32 v3, s5
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_add_i32 s0, s0, s4
+; CHECK-NEXT:    s_addc_u32 s1, s1, s5
+; CHECK-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v7
+; CHECK-NEXT:    v_readfirstlane_b32 s2, v6
 ; CHECK-NEXT:    v_mov_b32_e32 v4, s6
 ; CHECK-NEXT:    v_mov_b32_e32 v5, s7
 ; CHECK-NEXT:    s_mov_b32 s1, s0
@@ -235,22 +225,18 @@ define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
 define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_v2i64:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_sub_u32 s6, s2, s6
-; CHECK-NEXT:    v_mov_b32_e32 v9, s3
+; CHECK-NEXT:    s_sub_i32 s6, s2, s6
 ; CHECK-NEXT:    s_subb_u32 s7, s3, s7
-; CHECK-NEXT:    v_mov_b32_e32 v8, s2
-; CHECK-NEXT:    s_sub_u32 s4, s0, s4
-; CHECK-NEXT:    v_mov_b32_e32 v7, s1
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[8:9]
-; CHECK-NEXT:    s_subb_u32 s5, s1, s5
-; CHECK-NEXT:    v_mov_b32_e32 v6, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[6:7]
-; CHECK-NEXT:    v_readfirstlane_b32 s2, v8
-; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; CHECK-NEXT:    v_readfirstlane_b32 s0, v6
-; CHECK-NEXT:    v_mov_b32_e32 v2, s4
-; CHECK-NEXT:    v_mov_b32_e32 v3, s5
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_sub_i32 s0, s0, s4
+; CHECK-NEXT:    s_subb_u32 s1, s1, s5
+; CHECK-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v7
+; CHECK-NEXT:    v_readfirstlane_b32 s2, v6
 ; CHECK-NEXT:    v_mov_b32_e32 v4, s6
 ; CHECK-NEXT:    v_mov_b32_e32 v5, s7
 ; CHECK-NEXT:    s_mov_b32 s1, s0
@@ -269,15 +255,13 @@ define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
 define amdgpu_ps i64 @s_uadd_i64(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_uadd_i64:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s2, s0, s2
-; CHECK-NEXT:    v_mov_b32_e32 v3, s1
-; CHECK-NEXT:    s_addc_u32 s3, s1, s3
+; CHECK-NEXT:    s_add_i32 s0, s0, s2
+; CHECK-NEXT:    s_addc_u32 s1, s1, s3
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
-; CHECK-NEXT:    v_mov_b32_e32 v5, s3
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v4, s2
-; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -293,11 +277,10 @@ define amdgpu_ps i64 @s_uadd_i64(i64 inreg %val0, i64 inreg %val1, ptr %ptrval)
 define amdgpu_ps i64 @s_uadd_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_uadd_p1:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s0, s0, 1
+; CHECK-NEXT:    s_add_i32 s0, s0, 1
 ; CHECK-NEXT:    s_addc_u32 s1, s1, 0
-; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 0
-; CHECK-NEXT:    v_mov_b32_e32 v3, s1
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
 ; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
@@ -339,15 +322,13 @@ define amdgpu_ps i64 @s_uadd_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_p1:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s2, s0, -1
-; CHECK-NEXT:    v_mov_b32_e32 v3, s1
-; CHECK-NEXT:    s_addc_u32 s3, s1, -1
+; CHECK-NEXT:    s_sub_i32 s0, s0, 1
+; CHECK-NEXT:    s_subb_u32 s1, s1, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
-; CHECK-NEXT:    v_mov_b32_e32 v5, s3
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v4, s2
-; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -363,15 +344,13 @@ define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 define amdgpu_ps i64 @s_usub_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_n1:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s2, s0, 1
-; CHECK-NEXT:    v_mov_b32_e32 v3, s1
-; CHECK-NEXT:    s_addc_u32 s3, s1, 0
+; CHECK-NEXT:    s_sub_i32 s0, s0, -1
+; CHECK-NEXT:    s_subb_u32 s1, s1, -1
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
-; CHECK-NEXT:    v_mov_b32_e32 v5, s3
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v4, s2
-; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
index e68353e5223fb..7413bbc4dbdd2 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-idiv.ll
@@ -7794,7 +7794,6 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0xd
 ; GFX6-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
 ; GFX6-NEXT:    s_mov_b32 s3, 0xf000
-; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX6-NEXT:    s_lshl_b64 s[0:1], 0x1000, s0
 ; GFX6-NEXT:    s_ashr_i32 s8, s1, 31
@@ -7804,8 +7803,8 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    s_xor_b64 s[10:11], s[0:1], s[8:9]
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s10
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s11
-; GFX6-NEXT:    s_sub_u32 s12, 0, s10
-; GFX6-NEXT:    s_subb_u32 s13, 0, s11
+; GFX6-NEXT:    s_sub_u32 s0, 0, s10
+; GFX6-NEXT:    s_subb_u32 s1, 0, s11
 ; GFX6-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -7814,95 +7813,89 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX6-NEXT:    v_mul_hi_u32 v2, s12, v0
-; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
-; GFX6-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX6-NEXT:    s_mul_i32 s1, s12, s14
-; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
-; GFX6-NEXT:    s_mul_i32 s15, s13, s0
-; GFX6-NEXT:    s_mul_i32 s16, s12, s0
-; GFX6-NEXT:    s_add_i32 s1, s17, s1
-; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s16
-; GFX6-NEXT:    s_add_i32 s1, s1, s15
-; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s1
-; GFX6-NEXT:    v_mul_hi_u32 v4, v1, s16
-; GFX6-NEXT:    v_readfirstlane_b32 s15, v3
-; GFX6-NEXT:    s_mul_i32 s17, s0, s1
-; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s1
-; GFX6-NEXT:    s_add_u32 s15, s15, s17
-; GFX6-NEXT:    v_readfirstlane_b32 s17, v0
-; GFX6-NEXT:    s_addc_u32 s17, 0, s17
-; GFX6-NEXT:    s_mul_i32 s16, s14, s16
-; GFX6-NEXT:    v_readfirstlane_b32 s18, v4
-; GFX6-NEXT:    s_add_u32 s15, s15, s16
-; GFX6-NEXT:    s_addc_u32 s15, s17, s18
-; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
-; GFX6-NEXT:    s_addc_u32 s16, s16, 0
-; GFX6-NEXT:    s_mul_i32 s1, s14, s1
-; GFX6-NEXT:    s_add_u32 s1, s15, s1
-; GFX6-NEXT:    s_addc_u32 s15, 0, s16
-; GFX6-NEXT:    s_add_i32 s16, s0, s1
-; GFX6-NEXT:    v_mov_b32_e32 v0, s16
-; GFX6-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
-; GFX6-NEXT:    s_or_b32 s0, s0, s1
-; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
-; GFX6-NEXT:    s_addc_u32 s14, s14, s15
-; GFX6-NEXT:    s_mul_i32 s0, s12, s14
-; GFX6-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX6-NEXT:    s_add_i32 s0, s1, s0
-; GFX6-NEXT:    s_mul_i32 s13, s13, s16
-; GFX6-NEXT:    s_mul_i32 s1, s12, s16
-; GFX6-NEXT:    s_add_i32 s0, s0, s13
-; GFX6-NEXT:    v_mov_b32_e32 v2, s1
-; GFX6-NEXT:    v_mov_b32_e32 v0, s0
-; GFX6-NEXT:    v_mul_hi_u32 v3, s14, v2
-; GFX6-NEXT:    v_mul_hi_u32 v2, s16, v2
-; GFX6-NEXT:    v_mul_hi_u32 v1, s14, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s16, v0
-; GFX6-NEXT:    s_mul_i32 s13, s16, s0
-; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
-; GFX6-NEXT:    s_add_u32 s13, s17, s13
-; GFX6-NEXT:    v_readfirstlane_b32 s15, v0
-; GFX6-NEXT:    s_mul_i32 s1, s14, s1
-; GFX6-NEXT:    s_addc_u32 s15, 0, s15
-; GFX6-NEXT:    v_readfirstlane_b32 s12, v3
-; GFX6-NEXT:    s_add_u32 s1, s13, s1
-; GFX6-NEXT:    s_addc_u32 s1, s15, s12
+; GFX6-NEXT:    v_mul_hi_u32 v2, s0, v0
 ; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
-; GFX6-NEXT:    s_addc_u32 s12, s12, 0
-; GFX6-NEXT:    s_mul_i32 s0, s14, s0
-; GFX6-NEXT:    s_add_u32 s0, s1, s0
-; GFX6-NEXT:    s_addc_u32 s12, 0, s12
-; GFX6-NEXT:    s_add_i32 s15, s16, s0
-; GFX6-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GFX6-NEXT:    s_or_b32 s0, s0, s1
-; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
-; GFX6-NEXT:    s_addc_u32 s14, s14, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX6-NEXT:    s_mul_i32 s13, s0, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v2
+; GFX6-NEXT:    s_mul_i32 s14, s1, s2
+; GFX6-NEXT:    s_mul_i32 s15, s0, s2
+; GFX6-NEXT:    s_add_i32 s13, s16, s13
+; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s15
+; GFX6-NEXT:    s_add_i32 s13, s13, s14
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s13
+; GFX6-NEXT:    v_mul_hi_u32 v4, v1, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v3
+; GFX6-NEXT:    s_mul_i32 s16, s2, s13
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s13
+; GFX6-NEXT:    s_add_u32 s14, s14, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v0
+; GFX6-NEXT:    s_mul_i32 s15, s12, s15
+; GFX6-NEXT:    s_addc_u32 s16, 0, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v4
+; GFX6-NEXT:    s_add_u32 s14, s14, s15
+; GFX6-NEXT:    s_addc_u32 s14, s16, s17
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v1
+; GFX6-NEXT:    s_addc_u32 s15, s15, 0
+; GFX6-NEXT:    s_mul_i32 s13, s12, s13
+; GFX6-NEXT:    s_add_u32 s13, s14, s13
+; GFX6-NEXT:    s_addc_u32 s14, 0, s15
+; GFX6-NEXT:    s_add_i32 s13, s2, s13
+; GFX6-NEXT:    v_mov_b32_e32 v0, s13
+; GFX6-NEXT:    v_mul_hi_u32 v0, s0, v0
+; GFX6-NEXT:    s_addc_u32 s12, s12, s14
+; GFX6-NEXT:    s_mul_i32 s14, s0, s12
+; GFX6-NEXT:    s_mul_i32 s1, s1, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v0
+; GFX6-NEXT:    s_add_i32 s14, s15, s14
+; GFX6-NEXT:    s_mul_i32 s0, s0, s13
+; GFX6-NEXT:    s_add_i32 s1, s14, s1
+; GFX6-NEXT:    v_mov_b32_e32 v2, s0
+; GFX6-NEXT:    v_mov_b32_e32 v0, s1
+; GFX6-NEXT:    v_mul_hi_u32 v3, s12, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s13, v2
+; GFX6-NEXT:    v_mul_hi_u32 v1, s12, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s13, v0
+; GFX6-NEXT:    s_mul_i32 s15, s13, s1
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
+; GFX6-NEXT:    s_add_u32 s15, s17, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v0
+; GFX6-NEXT:    s_mul_i32 s0, s12, s0
+; GFX6-NEXT:    s_addc_u32 s16, 0, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v3
+; GFX6-NEXT:    s_add_u32 s0, s15, s0
+; GFX6-NEXT:    s_addc_u32 s0, s16, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX6-NEXT:    s_addc_u32 s14, s14, 0
+; GFX6-NEXT:    s_mul_i32 s1, s12, s1
+; GFX6-NEXT:    s_add_u32 s0, s0, s1
+; GFX6-NEXT:    s_addc_u32 s1, 0, s14
+; GFX6-NEXT:    s_add_i32 s14, s13, s0
+; GFX6-NEXT:    s_addc_u32 s15, s12, s1
 ; GFX6-NEXT:    s_ashr_i32 s12, s7, 31
 ; GFX6-NEXT:    s_add_u32 s0, s6, s12
 ; GFX6-NEXT:    s_mov_b32 s13, s12
 ; GFX6-NEXT:    s_addc_u32 s1, s7, s12
 ; GFX6-NEXT:    s_xor_b64 s[6:7], s[0:1], s[12:13]
-; GFX6-NEXT:    v_mov_b32_e32 v0, s14
+; GFX6-NEXT:    v_mov_b32_e32 v0, s15
 ; GFX6-NEXT:    v_mul_hi_u32 v1, s6, v0
-; GFX6-NEXT:    v_mov_b32_e32 v2, s15
+; GFX6-NEXT:    v_mov_b32_e32 v2, s14
 ; GFX6-NEXT:    v_mul_hi_u32 v3, s6, v2
 ; GFX6-NEXT:    s_mov_b32 s0, s4
 ; GFX6-NEXT:    v_readfirstlane_b32 s4, v1
 ; GFX6-NEXT:    v_mul_hi_u32 v1, s7, v2
-; GFX6-NEXT:    s_mul_i32 s1, s6, s14
+; GFX6-NEXT:    s_mul_i32 s1, s6, s15
 ; GFX6-NEXT:    v_readfirstlane_b32 s16, v3
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s7, v0
 ; GFX6-NEXT:    s_add_u32 s1, s16, s1
 ; GFX6-NEXT:    s_addc_u32 s4, 0, s4
-; GFX6-NEXT:    s_mul_i32 s15, s7, s15
+; GFX6-NEXT:    s_mul_i32 s14, s7, s14
 ; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
-; GFX6-NEXT:    s_add_u32 s1, s1, s15
+; GFX6-NEXT:    s_add_u32 s1, s1, s14
 ; GFX6-NEXT:    s_addc_u32 s1, s4, s16
 ; GFX6-NEXT:    v_readfirstlane_b32 s4, v0
 ; GFX6-NEXT:    s_addc_u32 s4, s4, 0
-; GFX6-NEXT:    s_mul_i32 s14, s7, s14
+; GFX6-NEXT:    s_mul_i32 s14, s7, s15
 ; GFX6-NEXT:    s_add_u32 s14, s1, s14
 ; GFX6-NEXT:    v_mov_b32_e32 v0, s14
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s10, v0
@@ -7917,43 +7910,40 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    s_mul_i32 s4, s10, s14
 ; GFX6-NEXT:    s_sub_i32 s6, s6, s4
 ; GFX6-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX6-NEXT:    s_or_b32 s18, s4, s5
-; GFX6-NEXT:    s_cmp_lg_u32 s18, 0
 ; GFX6-NEXT:    s_subb_u32 s17, s17, s11
-; GFX6-NEXT:    s_sub_i32 s19, s6, s10
-; GFX6-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX6-NEXT:    s_sub_i32 s18, s6, s10
+; GFX6-NEXT:    s_subb_u32 s17, s17, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s17, s11
+; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s18, s10
+; GFX6-NEXT:    s_cselect_b32 s18, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s17, s11
+; GFX6-NEXT:    s_cselect_b32 s17, s18, s19
+; GFX6-NEXT:    s_add_u32 s18, s14, 1
+; GFX6-NEXT:    s_addc_u32 s19, s15, 0
+; GFX6-NEXT:    s_add_u32 s20, s14, 2
+; GFX6-NEXT:    s_addc_u32 s21, s15, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s17, 0
+; GFX6-NEXT:    s_cselect_b32 s17, s20, s18
+; GFX6-NEXT:    s_cselect_b32 s18, s21, s19
 ; GFX6-NEXT:    s_or_b32 s4, s4, s5
 ; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX6-NEXT:    s_subb_u32 s4, s17, 0
+; GFX6-NEXT:    s_subb_u32 s4, s7, s16
 ; GFX6-NEXT:    s_cmp_ge_u32 s4, s11
 ; GFX6-NEXT:    s_cselect_b32 s5, -1, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s19, s10
-; GFX6-NEXT:    s_cselect_b32 s17, -1, 0
-; GFX6-NEXT:    s_cmp_eq_u32 s4, s11
-; GFX6-NEXT:    s_cselect_b32 s4, s17, s5
-; GFX6-NEXT:    s_add_u32 s5, s14, 1
-; GFX6-NEXT:    s_addc_u32 s17, s15, 0
-; GFX6-NEXT:    s_add_u32 s19, s14, 2
-; GFX6-NEXT:    s_addc_u32 s20, s15, 0
-; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX6-NEXT:    s_cselect_b32 s4, s19, s5
-; GFX6-NEXT:    s_cselect_b32 s5, s20, s17
-; GFX6-NEXT:    s_cmp_lg_u32 s18, 0
-; GFX6-NEXT:    s_subb_u32 s7, s7, s16
-; GFX6-NEXT:    s_cmp_ge_u32 s7, s11
-; GFX6-NEXT:    s_cselect_b32 s16, -1, 0
 ; GFX6-NEXT:    s_cmp_ge_u32 s6, s10
 ; GFX6-NEXT:    s_cselect_b32 s6, -1, 0
-; GFX6-NEXT:    s_cmp_eq_u32 s7, s11
-; GFX6-NEXT:    s_cselect_b32 s6, s6, s16
-; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
-; GFX6-NEXT:    s_cselect_b32 s5, s5, s15
-; GFX6-NEXT:    s_cselect_b32 s4, s4, s14
+; GFX6-NEXT:    s_cmp_eq_u32 s4, s11
+; GFX6-NEXT:    s_cselect_b32 s4, s6, s5
+; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX6-NEXT:    s_cselect_b32 s5, s18, s15
+; GFX6-NEXT:    s_cselect_b32 s4, s17, s14
 ; GFX6-NEXT:    s_xor_b64 s[6:7], s[12:13], s[8:9]
 ; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
 ; GFX6-NEXT:    s_sub_u32 s4, s4, s6
 ; GFX6-NEXT:    s_subb_u32 s5, s5, s7
 ; GFX6-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
@@ -7971,8 +7961,8 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s8
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s9
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT:    s_sub_u32 s10, 0, s8
-; GFX9-NEXT:    s_subb_u32 s11, 0, s9
+; GFX9-NEXT:    s_sub_u32 s4, 0, s8
+; GFX9-NEXT:    s_subb_u32 s5, 0, s9
 ; GFX9-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
@@ -7982,106 +7972,99 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    v_madmk_f32 v1, v2, 0xcf800000, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s12, v2
-; GFX9-NEXT:    v_readfirstlane_b32 s4, v1
-; GFX9-NEXT:    s_mul_i32 s5, s10, s12
-; GFX9-NEXT:    s_mul_hi_u32 s14, s10, s4
-; GFX9-NEXT:    s_mul_i32 s13, s11, s4
-; GFX9-NEXT:    s_add_i32 s5, s14, s5
-; GFX9-NEXT:    s_mul_i32 s15, s10, s4
-; GFX9-NEXT:    s_add_i32 s5, s5, s13
-; GFX9-NEXT:    s_mul_hi_u32 s14, s4, s15
-; GFX9-NEXT:    s_mul_i32 s16, s4, s5
-; GFX9-NEXT:    s_mul_hi_u32 s13, s4, s5
+; GFX9-NEXT:    v_readfirstlane_b32 s10, v2
+; GFX9-NEXT:    v_readfirstlane_b32 s11, v1
+; GFX9-NEXT:    s_mul_i32 s12, s4, s10
+; GFX9-NEXT:    s_mul_hi_u32 s14, s4, s11
+; GFX9-NEXT:    s_mul_i32 s13, s5, s11
+; GFX9-NEXT:    s_add_i32 s12, s14, s12
+; GFX9-NEXT:    s_mul_i32 s15, s4, s11
+; GFX9-NEXT:    s_add_i32 s12, s12, s13
+; GFX9-NEXT:    s_mul_hi_u32 s14, s11, s15
+; GFX9-NEXT:    s_mul_i32 s16, s11, s12
+; GFX9-NEXT:    s_mul_hi_u32 s13, s11, s12
 ; GFX9-NEXT:    s_add_u32 s14, s14, s16
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s13
-; GFX9-NEXT:    s_mul_hi_u32 s17, s12, s15
-; GFX9-NEXT:    s_mul_i32 s15, s12, s15
+; GFX9-NEXT:    s_mul_hi_u32 s17, s10, s15
+; GFX9-NEXT:    s_mul_i32 s15, s10, s15
 ; GFX9-NEXT:    s_add_u32 s14, s14, s15
-; GFX9-NEXT:    s_mul_hi_u32 s16, s12, s5
+; GFX9-NEXT:    s_mul_hi_u32 s16, s10, s12
 ; GFX9-NEXT:    s_addc_u32 s13, s13, s17
 ; GFX9-NEXT:    s_addc_u32 s14, s16, 0
-; GFX9-NEXT:    s_mul_i32 s5, s12, s5
-; GFX9-NEXT:    s_add_u32 s5, s13, s5
+; GFX9-NEXT:    s_mul_i32 s12, s10, s12
+; GFX9-NEXT:    s_add_u32 s12, s13, s12
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s14
-; GFX9-NEXT:    s_add_i32 s14, s4, s5
-; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT:    s_addc_u32 s12, s12, s13
-; GFX9-NEXT:    s_mul_i32 s4, s10, s12
-; GFX9-NEXT:    s_mul_hi_u32 s5, s10, s14
-; GFX9-NEXT:    s_add_i32 s4, s5, s4
-; GFX9-NEXT:    s_mul_i32 s11, s11, s14
-; GFX9-NEXT:    s_add_i32 s4, s4, s11
-; GFX9-NEXT:    s_mul_i32 s10, s10, s14
-; GFX9-NEXT:    s_mul_hi_u32 s11, s12, s10
-; GFX9-NEXT:    s_mul_i32 s13, s12, s10
-; GFX9-NEXT:    s_mul_i32 s16, s14, s4
-; GFX9-NEXT:    s_mul_hi_u32 s10, s14, s10
-; GFX9-NEXT:    s_mul_hi_u32 s15, s14, s4
-; GFX9-NEXT:    s_add_u32 s10, s10, s16
+; GFX9-NEXT:    s_add_i32 s11, s11, s12
+; GFX9-NEXT:    s_addc_u32 s10, s10, s13
+; GFX9-NEXT:    s_mul_i32 s12, s4, s10
+; GFX9-NEXT:    s_mul_hi_u32 s13, s4, s11
+; GFX9-NEXT:    s_add_i32 s12, s13, s12
+; GFX9-NEXT:    s_mul_i32 s5, s5, s11
+; GFX9-NEXT:    s_add_i32 s12, s12, s5
+; GFX9-NEXT:    s_mul_i32 s4, s4, s11
+; GFX9-NEXT:    s_mul_hi_u32 s13, s10, s4
+; GFX9-NEXT:    s_mul_i32 s14, s10, s4
+; GFX9-NEXT:    s_mul_i32 s16, s11, s12
+; GFX9-NEXT:    s_mul_hi_u32 s4, s11, s4
+; GFX9-NEXT:    s_mul_hi_u32 s15, s11, s12
+; GFX9-NEXT:    s_add_u32 s4, s4, s16
 ; GFX9-NEXT:    s_addc_u32 s15, 0, s15
-; GFX9-NEXT:    s_add_u32 s10, s10, s13
-; GFX9-NEXT:    s_mul_hi_u32 s5, s12, s4
-; GFX9-NEXT:    s_addc_u32 s10, s15, s11
+; GFX9-NEXT:    s_add_u32 s4, s4, s14
+; GFX9-NEXT:    s_mul_hi_u32 s5, s10, s12
+; GFX9-NEXT:    s_addc_u32 s4, s15, s13
 ; GFX9-NEXT:    s_addc_u32 s5, s5, 0
-; GFX9-NEXT:    s_mul_i32 s4, s12, s4
-; GFX9-NEXT:    s_add_u32 s4, s10, s4
-; GFX9-NEXT:    s_addc_u32 s10, 0, s5
-; GFX9-NEXT:    s_add_i32 s14, s14, s4
-; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT:    s_addc_u32 s10, s12, s10
+; GFX9-NEXT:    s_mul_i32 s12, s10, s12
+; GFX9-NEXT:    s_add_u32 s4, s4, s12
+; GFX9-NEXT:    s_addc_u32 s5, 0, s5
+; GFX9-NEXT:    s_add_i32 s11, s11, s4
+; GFX9-NEXT:    s_addc_u32 s10, s10, s5
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
 ; GFX9-NEXT:    s_add_u32 s2, s2, s4
 ; GFX9-NEXT:    s_mov_b32 s5, s4
 ; GFX9-NEXT:    s_addc_u32 s3, s3, s4
 ; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
-; GFX9-NEXT:    s_mul_i32 s12, s2, s10
-; GFX9-NEXT:    s_mul_hi_u32 s13, s2, s14
-; GFX9-NEXT:    s_mul_hi_u32 s11, s2, s10
-; GFX9-NEXT:    s_add_u32 s12, s13, s12
-; GFX9-NEXT:    s_addc_u32 s11, 0, s11
-; GFX9-NEXT:    s_mul_hi_u32 s15, s3, s14
-; GFX9-NEXT:    s_mul_i32 s14, s3, s14
-; GFX9-NEXT:    s_add_u32 s12, s12, s14
-; GFX9-NEXT:    s_mul_hi_u32 s13, s3, s10
-; GFX9-NEXT:    s_addc_u32 s11, s11, s15
-; GFX9-NEXT:    s_addc_u32 s12, s13, 0
+; GFX9-NEXT:    s_mul_i32 s13, s2, s10
+; GFX9-NEXT:    s_mul_hi_u32 s14, s2, s11
+; GFX9-NEXT:    s_mul_hi_u32 s12, s2, s10
+; GFX9-NEXT:    s_add_u32 s13, s14, s13
+; GFX9-NEXT:    s_addc_u32 s12, 0, s12
+; GFX9-NEXT:    s_mul_hi_u32 s15, s3, s11
+; GFX9-NEXT:    s_mul_i32 s11, s3, s11
+; GFX9-NEXT:    s_add_u32 s11, s13, s11
+; GFX9-NEXT:    s_mul_hi_u32 s14, s3, s10
+; GFX9-NEXT:    s_addc_u32 s11, s12, s15
+; GFX9-NEXT:    s_addc_u32 s12, s14, 0
 ; GFX9-NEXT:    s_mul_i32 s10, s3, s10
-; GFX9-NEXT:    s_add_u32 s14, s11, s10
-; GFX9-NEXT:    s_addc_u32 s15, 0, s12
-; GFX9-NEXT:    s_mul_i32 s10, s8, s15
-; GFX9-NEXT:    s_mul_hi_u32 s11, s8, s14
+; GFX9-NEXT:    s_add_u32 s13, s11, s10
+; GFX9-NEXT:    s_addc_u32 s12, 0, s12
+; GFX9-NEXT:    s_mul_i32 s10, s8, s12
+; GFX9-NEXT:    s_mul_hi_u32 s11, s8, s13
 ; GFX9-NEXT:    s_add_i32 s10, s11, s10
-; GFX9-NEXT:    s_mul_i32 s11, s9, s14
-; GFX9-NEXT:    s_add_i32 s16, s10, s11
-; GFX9-NEXT:    s_sub_i32 s12, s3, s16
-; GFX9-NEXT:    s_mul_i32 s10, s8, s14
+; GFX9-NEXT:    s_mul_i32 s11, s9, s13
+; GFX9-NEXT:    s_add_i32 s14, s10, s11
+; GFX9-NEXT:    s_sub_i32 s15, s3, s14
+; GFX9-NEXT:    s_mul_i32 s10, s8, s13
 ; GFX9-NEXT:    s_sub_i32 s2, s2, s10
 ; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT:    s_subb_u32 s17, s12, s9
-; GFX9-NEXT:    s_sub_i32 s18, s2, s8
-; GFX9-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
-; GFX9-NEXT:    s_subb_u32 s12, s17, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s12, s9
-; GFX9-NEXT:    s_cselect_b32 s13, -1, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s18, s8
+; GFX9-NEXT:    s_subb_u32 s15, s15, s9
+; GFX9-NEXT:    s_sub_i32 s16, s2, s8
+; GFX9-NEXT:    s_subb_u32 s15, s15, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s15, s9
 ; GFX9-NEXT:    s_cselect_b32 s17, -1, 0
-; GFX9-NEXT:    s_cmp_eq_u32 s12, s9
-; GFX9-NEXT:    s_cselect_b32 s12, s17, s13
-; GFX9-NEXT:    s_add_u32 s13, s14, 1
-; GFX9-NEXT:    s_addc_u32 s17, s15, 0
-; GFX9-NEXT:    s_add_u32 s18, s14, 2
-; GFX9-NEXT:    s_addc_u32 s19, s15, 0
-; GFX9-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX9-NEXT:    s_cselect_b32 s12, s18, s13
-; GFX9-NEXT:    s_cselect_b32 s13, s19, s17
+; GFX9-NEXT:    s_cmp_ge_u32 s16, s8
+; GFX9-NEXT:    s_cselect_b32 s16, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s15, s9
+; GFX9-NEXT:    s_cselect_b32 s15, s16, s17
+; GFX9-NEXT:    s_add_u32 s16, s13, 1
+; GFX9-NEXT:    s_addc_u32 s17, s12, 0
+; GFX9-NEXT:    s_add_u32 s18, s13, 2
+; GFX9-NEXT:    s_addc_u32 s19, s12, 0
+; GFX9-NEXT:    s_cmp_lg_u32 s15, 0
+; GFX9-NEXT:    s_cselect_b32 s15, s18, s16
+; GFX9-NEXT:    s_cselect_b32 s16, s19, s17
 ; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT:    s_subb_u32 s3, s3, s16
+; GFX9-NEXT:    s_subb_u32 s3, s3, s14
 ; GFX9-NEXT:    s_cmp_ge_u32 s3, s9
 ; GFX9-NEXT:    s_cselect_b32 s10, -1, 0
 ; GFX9-NEXT:    s_cmp_ge_u32 s2, s8
@@ -8089,8 +8072,8 @@ define amdgpu_kernel void @sdiv_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    s_cmp_eq_u32 s3, s9
 ; GFX9-NEXT:    s_cselect_b32 s2, s2, s10
 ; GFX9-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX9-NEXT:    s_cselect_b32 s3, s13, s15
-; GFX9-NEXT:    s_cselect_b32 s2, s12, s14
+; GFX9-NEXT:    s_cselect_b32 s3, s16, s12
+; GFX9-NEXT:    s_cselect_b32 s2, s15, s13
 ; GFX9-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
 ; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
 ; GFX9-NEXT:    s_sub_u32 s2, s2, s4
@@ -8311,8 +8294,8 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-NEXT:    s_xor_b64 s[6:7], s[6:7], s[2:3]
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s6
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s7
-; GFX6-NEXT:    s_sub_u32 s14, 0, s6
-; GFX6-NEXT:    s_subb_u32 s15, 0, s7
+; GFX6-NEXT:    s_sub_u32 s12, 0, s6
+; GFX6-NEXT:    s_subb_u32 s13, 0, s7
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -8321,71 +8304,65 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX6-NEXT:    v_mul_hi_u32 v2, s14, v0
-; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
-; GFX6-NEXT:    v_readfirstlane_b32 s12, v0
-; GFX6-NEXT:    s_mul_i32 s13, s14, s16
+; GFX6-NEXT:    v_mul_hi_u32 v2, s12, v0
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v0
+; GFX6-NEXT:    s_mul_i32 s16, s12, s14
 ; GFX6-NEXT:    v_readfirstlane_b32 s19, v2
-; GFX6-NEXT:    s_mul_i32 s17, s15, s12
-; GFX6-NEXT:    s_mul_i32 s18, s14, s12
-; GFX6-NEXT:    s_add_i32 s13, s19, s13
+; GFX6-NEXT:    s_mul_i32 s17, s13, s15
+; GFX6-NEXT:    s_mul_i32 s18, s12, s15
+; GFX6-NEXT:    s_add_i32 s16, s19, s16
 ; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s18
-; GFX6-NEXT:    s_add_i32 s13, s13, s17
-; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s13
+; GFX6-NEXT:    s_add_i32 s16, s16, s17
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s16
 ; GFX6-NEXT:    v_mul_hi_u32 v4, v1, s18
 ; GFX6-NEXT:    v_readfirstlane_b32 s17, v3
-; GFX6-NEXT:    s_mul_i32 s20, s12, s13
-; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s13
+; GFX6-NEXT:    s_mul_i32 s20, s15, s16
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s16
 ; GFX6-NEXT:    s_add_u32 s17, s17, s20
 ; GFX6-NEXT:    v_readfirstlane_b32 s20, v0
-; GFX6-NEXT:    s_mul_i32 s18, s16, s18
+; GFX6-NEXT:    s_mul_i32 s18, s14, s18
 ; GFX6-NEXT:    s_addc_u32 s20, 0, s20
 ; GFX6-NEXT:    v_readfirstlane_b32 s19, v4
 ; GFX6-NEXT:    s_add_u32 s17, s17, s18
 ; GFX6-NEXT:    s_addc_u32 s17, s20, s19
 ; GFX6-NEXT:    v_readfirstlane_b32 s18, v1
 ; GFX6-NEXT:    s_addc_u32 s18, s18, 0
-; GFX6-NEXT:    s_mul_i32 s13, s16, s13
-; GFX6-NEXT:    s_add_u32 s13, s17, s13
+; GFX6-NEXT:    s_mul_i32 s16, s14, s16
+; GFX6-NEXT:    s_add_u32 s16, s17, s16
 ; GFX6-NEXT:    s_addc_u32 s17, 0, s18
-; GFX6-NEXT:    s_add_i32 s18, s12, s13
-; GFX6-NEXT:    v_mov_b32_e32 v0, s18
-; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s14, v0
-; GFX6-NEXT:    s_or_b32 s12, s12, s13
-; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX6-NEXT:    s_addc_u32 s16, s16, s17
-; GFX6-NEXT:    s_mul_i32 s12, s14, s16
-; GFX6-NEXT:    v_readfirstlane_b32 s13, v0
-; GFX6-NEXT:    s_add_i32 s12, s13, s12
-; GFX6-NEXT:    s_mul_i32 s15, s15, s18
-; GFX6-NEXT:    s_mul_i32 s13, s14, s18
-; GFX6-NEXT:    s_add_i32 s12, s12, s15
-; GFX6-NEXT:    v_mov_b32_e32 v2, s13
-; GFX6-NEXT:    v_mov_b32_e32 v0, s12
-; GFX6-NEXT:    v_mul_hi_u32 v3, s16, v2
-; GFX6-NEXT:    v_mul_hi_u32 v2, s18, v2
-; GFX6-NEXT:    v_mul_hi_u32 v1, s16, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s18, v0
-; GFX6-NEXT:    s_mul_i32 s15, s18, s12
-; GFX6-NEXT:    v_readfirstlane_b32 s19, v2
-; GFX6-NEXT:    s_add_u32 s15, s19, s15
+; GFX6-NEXT:    s_add_i32 s15, s15, s16
+; GFX6-NEXT:    v_mov_b32_e32 v0, s15
+; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
+; GFX6-NEXT:    s_addc_u32 s14, s14, s17
+; GFX6-NEXT:    s_mul_i32 s16, s12, s14
+; GFX6-NEXT:    s_mul_i32 s13, s13, s15
 ; GFX6-NEXT:    v_readfirstlane_b32 s17, v0
-; GFX6-NEXT:    s_mul_i32 s13, s16, s13
-; GFX6-NEXT:    s_addc_u32 s17, 0, s17
-; GFX6-NEXT:    v_readfirstlane_b32 s14, v3
-; GFX6-NEXT:    s_add_u32 s13, s15, s13
-; GFX6-NEXT:    s_addc_u32 s13, s17, s14
-; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
-; GFX6-NEXT:    s_addc_u32 s14, s14, 0
-; GFX6-NEXT:    s_mul_i32 s12, s16, s12
-; GFX6-NEXT:    s_add_u32 s12, s13, s12
-; GFX6-NEXT:    s_addc_u32 s14, 0, s14
-; GFX6-NEXT:    s_add_i32 s15, s18, s12
-; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GFX6-NEXT:    s_or_b32 s12, s12, s13
-; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX6-NEXT:    s_addc_u32 s14, s16, s14
+; GFX6-NEXT:    s_add_i32 s16, s17, s16
+; GFX6-NEXT:    s_mul_i32 s12, s12, s15
+; GFX6-NEXT:    s_add_i32 s13, s16, s13
+; GFX6-NEXT:    v_mov_b32_e32 v2, s12
+; GFX6-NEXT:    v_mov_b32_e32 v0, s13
+; GFX6-NEXT:    v_mul_hi_u32 v3, s14, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s15, v2
+; GFX6-NEXT:    v_mul_hi_u32 v1, s14, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s15, v0
+; GFX6-NEXT:    s_mul_i32 s17, s15, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s19, v2
+; GFX6-NEXT:    s_add_u32 s17, s19, s17
+; GFX6-NEXT:    v_readfirstlane_b32 s18, v0
+; GFX6-NEXT:    s_mul_i32 s12, s14, s12
+; GFX6-NEXT:    s_addc_u32 s18, 0, s18
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v3
+; GFX6-NEXT:    s_add_u32 s12, s17, s12
+; GFX6-NEXT:    s_addc_u32 s12, s18, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
+; GFX6-NEXT:    s_addc_u32 s16, s16, 0
+; GFX6-NEXT:    s_mul_i32 s13, s14, s13
+; GFX6-NEXT:    s_add_u32 s12, s12, s13
+; GFX6-NEXT:    s_addc_u32 s13, 0, s16
+; GFX6-NEXT:    s_add_i32 s15, s15, s12
+; GFX6-NEXT:    s_addc_u32 s14, s14, s13
 ; GFX6-NEXT:    s_ashr_i32 s12, s9, 31
 ; GFX6-NEXT:    s_add_u32 s8, s8, s12
 ; GFX6-NEXT:    s_mov_b32 s13, s12
@@ -8422,38 +8399,34 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-NEXT:    s_mul_i32 s14, s6, s17
 ; GFX6-NEXT:    s_sub_i32 s8, s8, s14
 ; GFX6-NEXT:    s_cselect_b64 s[14:15], 1, 0
-; GFX6-NEXT:    s_or_b32 s20, s14, s15
-; GFX6-NEXT:    s_cmp_lg_u32 s20, 0
 ; GFX6-NEXT:    s_subb_u32 s19, s19, s7
-; GFX6-NEXT:    s_sub_i32 s21, s8, s6
-; GFX6-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; GFX6-NEXT:    s_sub_i32 s20, s8, s6
+; GFX6-NEXT:    s_subb_u32 s19, s19, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s19, s7
+; GFX6-NEXT:    s_cselect_b32 s21, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s20, s6
+; GFX6-NEXT:    s_cselect_b32 s20, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s19, s7
+; GFX6-NEXT:    s_cselect_b32 s19, s20, s21
+; GFX6-NEXT:    s_add_u32 s20, s17, 1
+; GFX6-NEXT:    s_addc_u32 s21, s16, 0
+; GFX6-NEXT:    s_add_u32 s22, s17, 2
+; GFX6-NEXT:    s_addc_u32 s23, s16, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s19, 0
+; GFX6-NEXT:    s_cselect_b32 s19, s22, s20
+; GFX6-NEXT:    s_cselect_b32 s20, s23, s21
 ; GFX6-NEXT:    s_or_b32 s14, s14, s15
 ; GFX6-NEXT:    s_cmp_lg_u32 s14, 0
-; GFX6-NEXT:    s_subb_u32 s14, s19, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s14, s7
-; GFX6-NEXT:    s_cselect_b32 s15, -1, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s21, s6
-; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
-; GFX6-NEXT:    s_cmp_eq_u32 s14, s7
-; GFX6-NEXT:    s_cselect_b32 s14, s19, s15
-; GFX6-NEXT:    s_add_u32 s15, s17, 1
-; GFX6-NEXT:    s_addc_u32 s19, s16, 0
-; GFX6-NEXT:    s_add_u32 s21, s17, 2
-; GFX6-NEXT:    s_addc_u32 s22, s16, 0
-; GFX6-NEXT:    s_cmp_lg_u32 s14, 0
-; GFX6-NEXT:    s_cselect_b32 s14, s21, s15
-; GFX6-NEXT:    s_cselect_b32 s15, s22, s19
-; GFX6-NEXT:    s_cmp_lg_u32 s20, 0
 ; GFX6-NEXT:    s_subb_u32 s9, s9, s18
 ; GFX6-NEXT:    s_cmp_ge_u32 s9, s7
-; GFX6-NEXT:    s_cselect_b32 s18, -1, 0
+; GFX6-NEXT:    s_cselect_b32 s14, -1, 0
 ; GFX6-NEXT:    s_cmp_ge_u32 s8, s6
 ; GFX6-NEXT:    s_cselect_b32 s6, -1, 0
 ; GFX6-NEXT:    s_cmp_eq_u32 s9, s7
-; GFX6-NEXT:    s_cselect_b32 s6, s6, s18
+; GFX6-NEXT:    s_cselect_b32 s6, s6, s14
 ; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
-; GFX6-NEXT:    s_cselect_b32 s7, s15, s16
-; GFX6-NEXT:    s_cselect_b32 s6, s14, s17
+; GFX6-NEXT:    s_cselect_b32 s7, s20, s16
+; GFX6-NEXT:    s_cselect_b32 s6, s19, s17
 ; GFX6-NEXT:    s_xor_b64 s[2:3], s[12:13], s[2:3]
 ; GFX6-NEXT:    s_xor_b64 s[6:7], s[6:7], s[2:3]
 ; GFX6-NEXT:    s_sub_u32 s14, s6, s2
@@ -8465,8 +8438,8 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-NEXT:    s_xor_b64 s[8:9], s[0:1], s[6:7]
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s8
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s9
-; GFX6-NEXT:    s_sub_u32 s12, 0, s8
-; GFX6-NEXT:    s_subb_u32 s13, 0, s9
+; GFX6-NEXT:    s_sub_u32 s2, 0, s8
+; GFX6-NEXT:    s_subb_u32 s3, 0, s9
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -8475,99 +8448,93 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX6-NEXT:    v_mul_hi_u32 v2, s12, v0
-; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
-; GFX6-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX6-NEXT:    s_mul_i32 s1, s12, s16
-; GFX6-NEXT:    v_readfirstlane_b32 s3, v2
-; GFX6-NEXT:    s_mul_i32 s0, s13, s2
-; GFX6-NEXT:    s_add_i32 s1, s3, s1
-; GFX6-NEXT:    s_add_i32 s3, s1, s0
-; GFX6-NEXT:    s_mul_i32 s17, s12, s2
-; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s3
-; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s17
-; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
-; GFX6-NEXT:    s_mul_i32 s4, s2, s3
-; GFX6-NEXT:    v_readfirstlane_b32 s5, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s2, v0
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
+; GFX6-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX6-NEXT:    s_mul_i32 s13, s2, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v2
+; GFX6-NEXT:    s_mul_i32 s1, s3, s0
+; GFX6-NEXT:    s_add_i32 s13, s16, s13
+; GFX6-NEXT:    s_add_i32 s13, s13, s1
+; GFX6-NEXT:    s_mul_i32 s1, s2, s0
+; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s13
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s1
+; GFX6-NEXT:    s_mul_i32 s16, s0, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
 ; GFX6-NEXT:    v_readfirstlane_b32 s18, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, v1, s17
-; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s3
-; GFX6-NEXT:    s_add_u32 s4, s18, s4
-; GFX6-NEXT:    s_addc_u32 s5, 0, s5
-; GFX6-NEXT:    s_mul_i32 s17, s16, s17
+; GFX6-NEXT:    v_mul_hi_u32 v0, v1, s1
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s13
+; GFX6-NEXT:    s_add_u32 s16, s18, s16
+; GFX6-NEXT:    s_addc_u32 s17, 0, s17
+; GFX6-NEXT:    s_mul_i32 s1, s12, s1
 ; GFX6-NEXT:    v_readfirstlane_b32 s18, v0
-; GFX6-NEXT:    s_add_u32 s4, s4, s17
-; GFX6-NEXT:    s_addc_u32 s4, s5, s18
-; GFX6-NEXT:    v_readfirstlane_b32 s5, v1
-; GFX6-NEXT:    s_addc_u32 s5, s5, 0
-; GFX6-NEXT:    s_mul_i32 s3, s16, s3
-; GFX6-NEXT:    s_add_u32 s3, s4, s3
-; GFX6-NEXT:    s_addc_u32 s4, 0, s5
-; GFX6-NEXT:    s_add_i32 s5, s2, s3
-; GFX6-NEXT:    v_mov_b32_e32 v0, s5
-; GFX6-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
-; GFX6-NEXT:    s_or_b32 s2, s2, s3
-; GFX6-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX6-NEXT:    s_addc_u32 s4, s16, s4
-; GFX6-NEXT:    s_mul_i32 s2, s12, s4
-; GFX6-NEXT:    v_readfirstlane_b32 s3, v0
-; GFX6-NEXT:    s_add_i32 s2, s3, s2
-; GFX6-NEXT:    s_mul_i32 s13, s13, s5
-; GFX6-NEXT:    s_mul_i32 s3, s12, s5
-; GFX6-NEXT:    s_add_i32 s2, s2, s13
-; GFX6-NEXT:    v_mov_b32_e32 v2, s3
-; GFX6-NEXT:    v_mov_b32_e32 v0, s2
+; GFX6-NEXT:    s_add_u32 s1, s16, s1
+; GFX6-NEXT:    s_addc_u32 s1, s17, s18
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
+; GFX6-NEXT:    s_addc_u32 s16, s16, 0
+; GFX6-NEXT:    s_mul_i32 s13, s12, s13
+; GFX6-NEXT:    s_add_u32 s1, s1, s13
+; GFX6-NEXT:    s_addc_u32 s13, 0, s16
+; GFX6-NEXT:    s_add_i32 s16, s0, s1
+; GFX6-NEXT:    v_mov_b32_e32 v0, s16
+; GFX6-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
+; GFX6-NEXT:    s_addc_u32 s4, s12, s13
+; GFX6-NEXT:    s_mul_i32 s5, s2, s4
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v0
+; GFX6-NEXT:    s_add_i32 s5, s12, s5
+; GFX6-NEXT:    s_mul_i32 s3, s3, s16
+; GFX6-NEXT:    s_mul_i32 s2, s2, s16
+; GFX6-NEXT:    s_add_i32 s3, s5, s3
+; GFX6-NEXT:    v_mov_b32_e32 v2, s2
+; GFX6-NEXT:    v_mov_b32_e32 v0, s3
 ; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v2
-; GFX6-NEXT:    v_mul_hi_u32 v2, s5, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s16, v2
 ; GFX6-NEXT:    v_mul_hi_u32 v1, s4, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s5, v0
-; GFX6-NEXT:    s_mul_i32 s13, s5, s2
+; GFX6-NEXT:    v_mul_hi_u32 v0, s16, v0
+; GFX6-NEXT:    s_mul_i32 s12, s16, s3
 ; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
-; GFX6-NEXT:    s_add_u32 s13, s17, s13
-; GFX6-NEXT:    v_readfirstlane_b32 s16, v0
-; GFX6-NEXT:    s_mul_i32 s3, s4, s3
-; GFX6-NEXT:    s_addc_u32 s16, 0, s16
-; GFX6-NEXT:    v_readfirstlane_b32 s12, v3
-; GFX6-NEXT:    s_add_u32 s3, s13, s3
-; GFX6-NEXT:    s_addc_u32 s3, s16, s12
-; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
-; GFX6-NEXT:    s_addc_u32 s12, s12, 0
+; GFX6-NEXT:    s_add_u32 s12, s17, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s13, v0
 ; GFX6-NEXT:    s_mul_i32 s2, s4, s2
-; GFX6-NEXT:    s_add_u32 s2, s3, s2
-; GFX6-NEXT:    s_addc_u32 s12, 0, s12
-; GFX6-NEXT:    s_add_i32 s13, s5, s2
-; GFX6-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; GFX6-NEXT:    s_or_b32 s2, s2, s3
-; GFX6-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX6-NEXT:    s_addc_u32 s12, s4, s12
+; GFX6-NEXT:    s_addc_u32 s13, 0, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX6-NEXT:    s_add_u32 s2, s12, s2
+; GFX6-NEXT:    s_addc_u32 s2, s13, s5
+; GFX6-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX6-NEXT:    s_addc_u32 s5, s5, 0
+; GFX6-NEXT:    s_mul_i32 s3, s4, s3
+; GFX6-NEXT:    s_add_u32 s2, s2, s3
+; GFX6-NEXT:    s_addc_u32 s3, 0, s5
+; GFX6-NEXT:    s_add_i32 s12, s16, s2
+; GFX6-NEXT:    s_addc_u32 s13, s4, s3
 ; GFX6-NEXT:    s_ashr_i32 s4, s11, 31
 ; GFX6-NEXT:    s_add_u32 s2, s10, s4
 ; GFX6-NEXT:    s_mov_b32 s5, s4
 ; GFX6-NEXT:    s_addc_u32 s3, s11, s4
 ; GFX6-NEXT:    s_xor_b64 s[10:11], s[2:3], s[4:5]
-; GFX6-NEXT:    v_mov_b32_e32 v0, s12
+; GFX6-NEXT:    v_mov_b32_e32 v0, s13
 ; GFX6-NEXT:    v_mul_hi_u32 v1, s10, v0
-; GFX6-NEXT:    v_mov_b32_e32 v2, s13
+; GFX6-NEXT:    v_mov_b32_e32 v2, s12
 ; GFX6-NEXT:    v_mul_hi_u32 v3, s10, v2
-; GFX6-NEXT:    s_mul_i32 s2, s10, s12
+; GFX6-NEXT:    s_mul_i32 s2, s10, s13
 ; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
 ; GFX6-NEXT:    v_mul_hi_u32 v1, s11, v2
 ; GFX6-NEXT:    v_readfirstlane_b32 s17, v3
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s11, v0
 ; GFX6-NEXT:    s_add_u32 s2, s17, s2
 ; GFX6-NEXT:    s_addc_u32 s16, 0, s16
-; GFX6-NEXT:    s_mul_i32 s13, s11, s13
+; GFX6-NEXT:    s_mul_i32 s12, s11, s12
 ; GFX6-NEXT:    v_readfirstlane_b32 s17, v1
-; GFX6-NEXT:    s_add_u32 s2, s2, s13
+; GFX6-NEXT:    s_add_u32 s2, s2, s12
 ; GFX6-NEXT:    s_addc_u32 s2, s16, s17
-; GFX6-NEXT:    v_readfirstlane_b32 s13, v0
-; GFX6-NEXT:    s_addc_u32 s13, s13, 0
-; GFX6-NEXT:    s_mul_i32 s12, s11, s12
-; GFX6-NEXT:    s_add_u32 s16, s2, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v0
+; GFX6-NEXT:    s_addc_u32 s12, s12, 0
+; GFX6-NEXT:    s_mul_i32 s13, s11, s13
+; GFX6-NEXT:    s_add_u32 s16, s2, s13
 ; GFX6-NEXT:    v_mov_b32_e32 v0, s16
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s8, v0
-; GFX6-NEXT:    s_addc_u32 s17, 0, s13
+; GFX6-NEXT:    s_addc_u32 s17, 0, s12
 ; GFX6-NEXT:    s_mul_i32 s12, s8, s17
 ; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    v_readfirstlane_b32 s13, v0
@@ -8578,38 +8545,34 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-NEXT:    s_mul_i32 s12, s8, s16
 ; GFX6-NEXT:    s_sub_i32 s10, s10, s12
 ; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GFX6-NEXT:    s_or_b32 s20, s12, s13
-; GFX6-NEXT:    s_cmp_lg_u32 s20, 0
 ; GFX6-NEXT:    s_subb_u32 s19, s19, s9
-; GFX6-NEXT:    s_sub_i32 s21, s10, s8
-; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX6-NEXT:    s_sub_i32 s20, s10, s8
+; GFX6-NEXT:    s_subb_u32 s19, s19, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s19, s9
+; GFX6-NEXT:    s_cselect_b32 s21, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s20, s8
+; GFX6-NEXT:    s_cselect_b32 s20, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s19, s9
+; GFX6-NEXT:    s_cselect_b32 s19, s20, s21
+; GFX6-NEXT:    s_add_u32 s20, s16, 1
+; GFX6-NEXT:    s_addc_u32 s21, s17, 0
+; GFX6-NEXT:    s_add_u32 s22, s16, 2
+; GFX6-NEXT:    s_addc_u32 s23, s17, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s19, 0
+; GFX6-NEXT:    s_cselect_b32 s19, s22, s20
+; GFX6-NEXT:    s_cselect_b32 s20, s23, s21
 ; GFX6-NEXT:    s_or_b32 s12, s12, s13
 ; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX6-NEXT:    s_subb_u32 s12, s19, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s12, s9
-; GFX6-NEXT:    s_cselect_b32 s13, -1, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s21, s8
-; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
-; GFX6-NEXT:    s_cmp_eq_u32 s12, s9
-; GFX6-NEXT:    s_cselect_b32 s12, s19, s13
-; GFX6-NEXT:    s_add_u32 s13, s16, 1
-; GFX6-NEXT:    s_addc_u32 s19, s17, 0
-; GFX6-NEXT:    s_add_u32 s21, s16, 2
-; GFX6-NEXT:    s_addc_u32 s22, s17, 0
-; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX6-NEXT:    s_cselect_b32 s12, s21, s13
-; GFX6-NEXT:    s_cselect_b32 s13, s22, s19
-; GFX6-NEXT:    s_cmp_lg_u32 s20, 0
 ; GFX6-NEXT:    s_subb_u32 s11, s11, s18
 ; GFX6-NEXT:    s_cmp_ge_u32 s11, s9
-; GFX6-NEXT:    s_cselect_b32 s18, -1, 0
+; GFX6-NEXT:    s_cselect_b32 s12, -1, 0
 ; GFX6-NEXT:    s_cmp_ge_u32 s10, s8
 ; GFX6-NEXT:    s_cselect_b32 s8, -1, 0
 ; GFX6-NEXT:    s_cmp_eq_u32 s11, s9
-; GFX6-NEXT:    s_cselect_b32 s8, s8, s18
+; GFX6-NEXT:    s_cselect_b32 s8, s8, s12
 ; GFX6-NEXT:    s_cmp_lg_u32 s8, 0
-; GFX6-NEXT:    s_cselect_b32 s9, s13, s17
-; GFX6-NEXT:    s_cselect_b32 s8, s12, s16
+; GFX6-NEXT:    s_cselect_b32 s9, s20, s17
+; GFX6-NEXT:    s_cselect_b32 s8, s19, s16
 ; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
 ; GFX6-NEXT:    s_xor_b64 s[6:7], s[8:9], s[4:5]
 ; GFX6-NEXT:    s_sub_u32 s4, s6, s4
@@ -8636,8 +8599,8 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    s_xor_b64 s[6:7], s[6:7], s[2:3]
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s7
-; GFX9-NEXT:    s_sub_u32 s14, 0, s6
-; GFX9-NEXT:    s_subb_u32 s15, 0, s7
+; GFX9-NEXT:    s_sub_u32 s12, 0, s6
+; GFX9-NEXT:    s_subb_u32 s13, 0, s7
 ; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -8646,105 +8609,98 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT:    v_readfirstlane_b32 s16, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s12, v0
-; GFX9-NEXT:    s_mul_i32 s13, s14, s16
-; GFX9-NEXT:    s_mul_hi_u32 s18, s14, s12
-; GFX9-NEXT:    s_mul_i32 s17, s15, s12
-; GFX9-NEXT:    s_add_i32 s13, s18, s13
-; GFX9-NEXT:    s_mul_i32 s19, s14, s12
-; GFX9-NEXT:    s_add_i32 s13, s13, s17
-; GFX9-NEXT:    s_mul_hi_u32 s18, s12, s19
-; GFX9-NEXT:    s_mul_i32 s20, s12, s13
-; GFX9-NEXT:    s_mul_hi_u32 s17, s12, s13
+; GFX9-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s15, v0
+; GFX9-NEXT:    s_mul_i32 s16, s12, s14
+; GFX9-NEXT:    s_mul_hi_u32 s18, s12, s15
+; GFX9-NEXT:    s_mul_i32 s17, s13, s15
+; GFX9-NEXT:    s_add_i32 s16, s18, s16
+; GFX9-NEXT:    s_mul_i32 s19, s12, s15
+; GFX9-NEXT:    s_add_i32 s16, s16, s17
+; GFX9-NEXT:    s_mul_hi_u32 s18, s15, s19
+; GFX9-NEXT:    s_mul_i32 s20, s15, s16
+; GFX9-NEXT:    s_mul_hi_u32 s17, s15, s16
 ; GFX9-NEXT:    s_add_u32 s18, s18, s20
 ; GFX9-NEXT:    s_addc_u32 s17, 0, s17
-; GFX9-NEXT:    s_mul_hi_u32 s20, s16, s19
-; GFX9-NEXT:    s_mul_i32 s19, s16, s19
+; GFX9-NEXT:    s_mul_hi_u32 s20, s14, s19
+; GFX9-NEXT:    s_mul_i32 s19, s14, s19
 ; GFX9-NEXT:    s_add_u32 s18, s18, s19
-; GFX9-NEXT:    s_mul_hi_u32 s21, s16, s13
+; GFX9-NEXT:    s_mul_hi_u32 s21, s14, s16
 ; GFX9-NEXT:    s_addc_u32 s17, s17, s20
 ; GFX9-NEXT:    s_addc_u32 s18, s21, 0
-; GFX9-NEXT:    s_mul_i32 s13, s16, s13
-; GFX9-NEXT:    s_add_u32 s13, s17, s13
+; GFX9-NEXT:    s_mul_i32 s16, s14, s16
+; GFX9-NEXT:    s_add_u32 s16, s17, s16
 ; GFX9-NEXT:    s_addc_u32 s17, 0, s18
-; GFX9-NEXT:    s_add_i32 s18, s12, s13
-; GFX9-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
-; GFX9-NEXT:    s_addc_u32 s16, s16, s17
-; GFX9-NEXT:    s_mul_i32 s12, s14, s16
-; GFX9-NEXT:    s_mul_hi_u32 s13, s14, s18
-; GFX9-NEXT:    s_add_i32 s12, s13, s12
-; GFX9-NEXT:    s_mul_i32 s15, s15, s18
-; GFX9-NEXT:    s_add_i32 s12, s12, s15
-; GFX9-NEXT:    s_mul_i32 s14, s14, s18
-; GFX9-NEXT:    s_mul_hi_u32 s15, s16, s14
-; GFX9-NEXT:    s_mul_i32 s17, s16, s14
-; GFX9-NEXT:    s_mul_i32 s20, s18, s12
-; GFX9-NEXT:    s_mul_hi_u32 s14, s18, s14
-; GFX9-NEXT:    s_mul_hi_u32 s19, s18, s12
-; GFX9-NEXT:    s_add_u32 s14, s14, s20
+; GFX9-NEXT:    s_add_i32 s15, s15, s16
+; GFX9-NEXT:    s_addc_u32 s14, s14, s17
+; GFX9-NEXT:    s_mul_i32 s16, s12, s14
+; GFX9-NEXT:    s_mul_hi_u32 s17, s12, s15
+; GFX9-NEXT:    s_add_i32 s16, s17, s16
+; GFX9-NEXT:    s_mul_i32 s13, s13, s15
+; GFX9-NEXT:    s_add_i32 s16, s16, s13
+; GFX9-NEXT:    s_mul_i32 s12, s12, s15
+; GFX9-NEXT:    s_mul_hi_u32 s17, s14, s12
+; GFX9-NEXT:    s_mul_i32 s18, s14, s12
+; GFX9-NEXT:    s_mul_i32 s20, s15, s16
+; GFX9-NEXT:    s_mul_hi_u32 s12, s15, s12
+; GFX9-NEXT:    s_mul_hi_u32 s19, s15, s16
+; GFX9-NEXT:    s_add_u32 s12, s12, s20
 ; GFX9-NEXT:    s_addc_u32 s19, 0, s19
-; GFX9-NEXT:    s_add_u32 s14, s14, s17
-; GFX9-NEXT:    s_mul_hi_u32 s13, s16, s12
-; GFX9-NEXT:    s_addc_u32 s14, s19, s15
+; GFX9-NEXT:    s_add_u32 s12, s12, s18
+; GFX9-NEXT:    s_mul_hi_u32 s13, s14, s16
+; GFX9-NEXT:    s_addc_u32 s12, s19, s17
 ; GFX9-NEXT:    s_addc_u32 s13, s13, 0
-; GFX9-NEXT:    s_mul_i32 s12, s16, s12
-; GFX9-NEXT:    s_add_u32 s12, s14, s12
-; GFX9-NEXT:    s_addc_u32 s14, 0, s13
-; GFX9-NEXT:    s_add_i32 s18, s18, s12
-; GFX9-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
-; GFX9-NEXT:    s_addc_u32 s14, s16, s14
+; GFX9-NEXT:    s_mul_i32 s16, s14, s16
+; GFX9-NEXT:    s_add_u32 s12, s12, s16
+; GFX9-NEXT:    s_addc_u32 s13, 0, s13
+; GFX9-NEXT:    s_add_i32 s15, s15, s12
+; GFX9-NEXT:    s_addc_u32 s14, s14, s13
 ; GFX9-NEXT:    s_ashr_i32 s12, s9, 31
 ; GFX9-NEXT:    s_add_u32 s8, s8, s12
 ; GFX9-NEXT:    s_mov_b32 s13, s12
 ; GFX9-NEXT:    s_addc_u32 s9, s9, s12
 ; GFX9-NEXT:    s_xor_b64 s[8:9], s[8:9], s[12:13]
-; GFX9-NEXT:    s_mul_i32 s16, s8, s14
-; GFX9-NEXT:    s_mul_hi_u32 s17, s8, s18
-; GFX9-NEXT:    s_mul_hi_u32 s15, s8, s14
-; GFX9-NEXT:    s_add_u32 s16, s17, s16
-; GFX9-NEXT:    s_addc_u32 s15, 0, s15
-; GFX9-NEXT:    s_mul_hi_u32 s19, s9, s18
-; GFX9-NEXT:    s_mul_i32 s18, s9, s18
-; GFX9-NEXT:    s_add_u32 s16, s16, s18
-; GFX9-NEXT:    s_mul_hi_u32 s17, s9, s14
-; GFX9-NEXT:    s_addc_u32 s15, s15, s19
-; GFX9-NEXT:    s_addc_u32 s16, s17, 0
+; GFX9-NEXT:    s_mul_i32 s17, s8, s14
+; GFX9-NEXT:    s_mul_hi_u32 s18, s8, s15
+; GFX9-NEXT:    s_mul_hi_u32 s16, s8, s14
+; GFX9-NEXT:    s_add_u32 s17, s18, s17
+; GFX9-NEXT:    s_addc_u32 s16, 0, s16
+; GFX9-NEXT:    s_mul_hi_u32 s19, s9, s15
+; GFX9-NEXT:    s_mul_i32 s15, s9, s15
+; GFX9-NEXT:    s_add_u32 s15, s17, s15
+; GFX9-NEXT:    s_mul_hi_u32 s18, s9, s14
+; GFX9-NEXT:    s_addc_u32 s15, s16, s19
+; GFX9-NEXT:    s_addc_u32 s16, s18, 0
 ; GFX9-NEXT:    s_mul_i32 s14, s9, s14
-; GFX9-NEXT:    s_add_u32 s18, s15, s14
-; GFX9-NEXT:    s_addc_u32 s19, 0, s16
-; GFX9-NEXT:    s_mul_i32 s14, s6, s19
-; GFX9-NEXT:    s_mul_hi_u32 s15, s6, s18
+; GFX9-NEXT:    s_add_u32 s17, s15, s14
+; GFX9-NEXT:    s_addc_u32 s16, 0, s16
+; GFX9-NEXT:    s_mul_i32 s14, s6, s16
+; GFX9-NEXT:    s_mul_hi_u32 s15, s6, s17
 ; GFX9-NEXT:    s_add_i32 s14, s15, s14
-; GFX9-NEXT:    s_mul_i32 s15, s7, s18
-; GFX9-NEXT:    s_add_i32 s20, s14, s15
-; GFX9-NEXT:    s_sub_i32 s16, s9, s20
-; GFX9-NEXT:    s_mul_i32 s14, s6, s18
+; GFX9-NEXT:    s_mul_i32 s15, s7, s17
+; GFX9-NEXT:    s_add_i32 s18, s14, s15
+; GFX9-NEXT:    s_sub_i32 s19, s9, s18
+; GFX9-NEXT:    s_mul_i32 s14, s6, s17
 ; GFX9-NEXT:    s_sub_i32 s8, s8, s14
 ; GFX9-NEXT:    s_cselect_b64 s[14:15], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[14:15], 0
-; GFX9-NEXT:    s_subb_u32 s21, s16, s7
-; GFX9-NEXT:    s_sub_i32 s22, s8, s6
-; GFX9-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[16:17], 0
-; GFX9-NEXT:    s_subb_u32 s16, s21, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s16, s7
-; GFX9-NEXT:    s_cselect_b32 s17, -1, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s22, s6
+; GFX9-NEXT:    s_subb_u32 s19, s19, s7
+; GFX9-NEXT:    s_sub_i32 s20, s8, s6
+; GFX9-NEXT:    s_subb_u32 s19, s19, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s19, s7
 ; GFX9-NEXT:    s_cselect_b32 s21, -1, 0
-; GFX9-NEXT:    s_cmp_eq_u32 s16, s7
-; GFX9-NEXT:    s_cselect_b32 s16, s21, s17
-; GFX9-NEXT:    s_add_u32 s17, s18, 1
-; GFX9-NEXT:    s_addc_u32 s21, s19, 0
-; GFX9-NEXT:    s_add_u32 s22, s18, 2
-; GFX9-NEXT:    s_addc_u32 s23, s19, 0
-; GFX9-NEXT:    s_cmp_lg_u32 s16, 0
-; GFX9-NEXT:    s_cselect_b32 s16, s22, s17
-; GFX9-NEXT:    s_cselect_b32 s17, s23, s21
+; GFX9-NEXT:    s_cmp_ge_u32 s20, s6
+; GFX9-NEXT:    s_cselect_b32 s20, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s19, s7
+; GFX9-NEXT:    s_cselect_b32 s19, s20, s21
+; GFX9-NEXT:    s_add_u32 s20, s17, 1
+; GFX9-NEXT:    s_addc_u32 s21, s16, 0
+; GFX9-NEXT:    s_add_u32 s22, s17, 2
+; GFX9-NEXT:    s_addc_u32 s23, s16, 0
+; GFX9-NEXT:    s_cmp_lg_u32 s19, 0
+; GFX9-NEXT:    s_cselect_b32 s19, s22, s20
+; GFX9-NEXT:    s_cselect_b32 s20, s23, s21
 ; GFX9-NEXT:    s_cmp_lg_u64 s[14:15], 0
-; GFX9-NEXT:    s_subb_u32 s9, s9, s20
+; GFX9-NEXT:    s_subb_u32 s9, s9, s18
 ; GFX9-NEXT:    s_cmp_ge_u32 s9, s7
 ; GFX9-NEXT:    s_cselect_b32 s14, -1, 0
 ; GFX9-NEXT:    s_cmp_ge_u32 s8, s6
@@ -8752,12 +8708,12 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    s_cmp_eq_u32 s9, s7
 ; GFX9-NEXT:    s_cselect_b32 s6, s6, s14
 ; GFX9-NEXT:    s_cmp_lg_u32 s6, 0
-; GFX9-NEXT:    s_cselect_b32 s7, s17, s19
-; GFX9-NEXT:    s_cselect_b32 s6, s16, s18
+; GFX9-NEXT:    s_cselect_b32 s7, s20, s16
+; GFX9-NEXT:    s_cselect_b32 s6, s19, s17
 ; GFX9-NEXT:    s_xor_b64 s[2:3], s[12:13], s[2:3]
 ; GFX9-NEXT:    s_xor_b64 s[6:7], s[6:7], s[2:3]
-; GFX9-NEXT:    s_sub_u32 s14, s6, s2
-; GFX9-NEXT:    s_subb_u32 s15, s7, s3
+; GFX9-NEXT:    s_sub_u32 s12, s6, s2
+; GFX9-NEXT:    s_subb_u32 s13, s7, s3
 ; GFX9-NEXT:    s_ashr_i32 s2, s1, 31
 ; GFX9-NEXT:    s_add_u32 s0, s0, s2
 ; GFX9-NEXT:    s_mov_b32 s3, s2
@@ -8766,8 +8722,8 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s7
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GFX9-NEXT:    s_sub_u32 s8, 0, s6
-; GFX9-NEXT:    s_subb_u32 s9, 0, s7
+; GFX9-NEXT:    s_sub_u32 s4, 0, s6
+; GFX9-NEXT:    s_subb_u32 s5, 0, s7
 ; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
@@ -8777,105 +8733,98 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    v_mac_f32_e32 v1, 0xcf800000, v2
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GFX9-NEXT:    v_readfirstlane_b32 s4, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s13, v2
-; GFX9-NEXT:    s_mul_hi_u32 s12, s8, s4
-; GFX9-NEXT:    s_mul_i32 s16, s8, s13
-; GFX9-NEXT:    s_mul_i32 s5, s9, s4
-; GFX9-NEXT:    s_add_i32 s12, s12, s16
-; GFX9-NEXT:    s_add_i32 s12, s12, s5
-; GFX9-NEXT:    s_mul_i32 s17, s8, s4
-; GFX9-NEXT:    s_mul_i32 s16, s4, s12
-; GFX9-NEXT:    s_mul_hi_u32 s18, s4, s17
-; GFX9-NEXT:    s_mul_hi_u32 s5, s4, s12
+; GFX9-NEXT:    v_readfirstlane_b32 s8, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s15, v2
+; GFX9-NEXT:    s_mul_hi_u32 s14, s4, s8
+; GFX9-NEXT:    s_mul_i32 s16, s4, s15
+; GFX9-NEXT:    s_mul_i32 s9, s5, s8
+; GFX9-NEXT:    s_add_i32 s14, s14, s16
+; GFX9-NEXT:    s_add_i32 s14, s14, s9
+; GFX9-NEXT:    s_mul_i32 s17, s4, s8
+; GFX9-NEXT:    s_mul_i32 s16, s8, s14
+; GFX9-NEXT:    s_mul_hi_u32 s18, s8, s17
+; GFX9-NEXT:    s_mul_hi_u32 s9, s8, s14
 ; GFX9-NEXT:    s_add_u32 s16, s18, s16
-; GFX9-NEXT:    s_addc_u32 s5, 0, s5
-; GFX9-NEXT:    s_mul_hi_u32 s19, s13, s17
-; GFX9-NEXT:    s_mul_i32 s17, s13, s17
+; GFX9-NEXT:    s_addc_u32 s9, 0, s9
+; GFX9-NEXT:    s_mul_hi_u32 s19, s15, s17
+; GFX9-NEXT:    s_mul_i32 s17, s15, s17
 ; GFX9-NEXT:    s_add_u32 s16, s16, s17
-; GFX9-NEXT:    s_mul_hi_u32 s18, s13, s12
-; GFX9-NEXT:    s_addc_u32 s5, s5, s19
+; GFX9-NEXT:    s_mul_hi_u32 s18, s15, s14
+; GFX9-NEXT:    s_addc_u32 s9, s9, s19
 ; GFX9-NEXT:    s_addc_u32 s16, s18, 0
-; GFX9-NEXT:    s_mul_i32 s12, s13, s12
-; GFX9-NEXT:    s_add_u32 s5, s5, s12
-; GFX9-NEXT:    s_addc_u32 s12, 0, s16
-; GFX9-NEXT:    s_add_i32 s16, s4, s5
-; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT:    s_addc_u32 s12, s13, s12
-; GFX9-NEXT:    s_mul_i32 s4, s8, s12
-; GFX9-NEXT:    s_mul_hi_u32 s5, s8, s16
-; GFX9-NEXT:    s_add_i32 s4, s5, s4
-; GFX9-NEXT:    s_mul_i32 s9, s9, s16
-; GFX9-NEXT:    s_add_i32 s4, s4, s9
-; GFX9-NEXT:    s_mul_i32 s8, s8, s16
-; GFX9-NEXT:    s_mul_hi_u32 s9, s12, s8
-; GFX9-NEXT:    s_mul_i32 s13, s12, s8
-; GFX9-NEXT:    s_mul_i32 s18, s16, s4
-; GFX9-NEXT:    s_mul_hi_u32 s8, s16, s8
-; GFX9-NEXT:    s_mul_hi_u32 s17, s16, s4
-; GFX9-NEXT:    s_add_u32 s8, s8, s18
+; GFX9-NEXT:    s_mul_i32 s14, s15, s14
+; GFX9-NEXT:    s_add_u32 s9, s9, s14
+; GFX9-NEXT:    s_addc_u32 s14, 0, s16
+; GFX9-NEXT:    s_add_i32 s8, s8, s9
+; GFX9-NEXT:    s_addc_u32 s9, s15, s14
+; GFX9-NEXT:    s_mul_i32 s14, s4, s9
+; GFX9-NEXT:    s_mul_hi_u32 s15, s4, s8
+; GFX9-NEXT:    s_add_i32 s14, s15, s14
+; GFX9-NEXT:    s_mul_i32 s5, s5, s8
+; GFX9-NEXT:    s_add_i32 s14, s14, s5
+; GFX9-NEXT:    s_mul_i32 s4, s4, s8
+; GFX9-NEXT:    s_mul_hi_u32 s15, s9, s4
+; GFX9-NEXT:    s_mul_i32 s16, s9, s4
+; GFX9-NEXT:    s_mul_i32 s18, s8, s14
+; GFX9-NEXT:    s_mul_hi_u32 s4, s8, s4
+; GFX9-NEXT:    s_mul_hi_u32 s17, s8, s14
+; GFX9-NEXT:    s_add_u32 s4, s4, s18
 ; GFX9-NEXT:    s_addc_u32 s17, 0, s17
-; GFX9-NEXT:    s_add_u32 s8, s8, s13
-; GFX9-NEXT:    s_mul_hi_u32 s5, s12, s4
-; GFX9-NEXT:    s_addc_u32 s8, s17, s9
+; GFX9-NEXT:    s_add_u32 s4, s4, s16
+; GFX9-NEXT:    s_mul_hi_u32 s5, s9, s14
+; GFX9-NEXT:    s_addc_u32 s4, s17, s15
 ; GFX9-NEXT:    s_addc_u32 s5, s5, 0
-; GFX9-NEXT:    s_mul_i32 s4, s12, s4
-; GFX9-NEXT:    s_add_u32 s4, s8, s4
-; GFX9-NEXT:    s_addc_u32 s8, 0, s5
-; GFX9-NEXT:    s_add_i32 s16, s16, s4
-; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT:    s_addc_u32 s12, s12, s8
+; GFX9-NEXT:    s_mul_i32 s14, s9, s14
+; GFX9-NEXT:    s_add_u32 s4, s4, s14
+; GFX9-NEXT:    s_addc_u32 s5, 0, s5
+; GFX9-NEXT:    s_add_i32 s14, s8, s4
+; GFX9-NEXT:    s_addc_u32 s15, s9, s5
 ; GFX9-NEXT:    s_ashr_i32 s4, s11, 31
 ; GFX9-NEXT:    s_add_u32 s8, s10, s4
 ; GFX9-NEXT:    s_mov_b32 s5, s4
 ; GFX9-NEXT:    s_addc_u32 s9, s11, s4
 ; GFX9-NEXT:    s_xor_b64 s[8:9], s[8:9], s[4:5]
-; GFX9-NEXT:    s_mul_i32 s11, s8, s12
-; GFX9-NEXT:    s_mul_hi_u32 s13, s8, s16
-; GFX9-NEXT:    s_mul_hi_u32 s10, s8, s12
-; GFX9-NEXT:    s_add_u32 s11, s13, s11
+; GFX9-NEXT:    s_mul_i32 s11, s8, s15
+; GFX9-NEXT:    s_mul_hi_u32 s16, s8, s14
+; GFX9-NEXT:    s_mul_hi_u32 s10, s8, s15
+; GFX9-NEXT:    s_add_u32 s11, s16, s11
 ; GFX9-NEXT:    s_addc_u32 s10, 0, s10
-; GFX9-NEXT:    s_mul_hi_u32 s17, s9, s16
-; GFX9-NEXT:    s_mul_i32 s16, s9, s16
-; GFX9-NEXT:    s_add_u32 s11, s11, s16
-; GFX9-NEXT:    s_mul_hi_u32 s13, s9, s12
+; GFX9-NEXT:    s_mul_hi_u32 s17, s9, s14
+; GFX9-NEXT:    s_mul_i32 s14, s9, s14
+; GFX9-NEXT:    s_add_u32 s11, s11, s14
+; GFX9-NEXT:    s_mul_hi_u32 s16, s9, s15
 ; GFX9-NEXT:    s_addc_u32 s10, s10, s17
-; GFX9-NEXT:    s_addc_u32 s11, s13, 0
-; GFX9-NEXT:    s_mul_i32 s12, s9, s12
-; GFX9-NEXT:    s_add_u32 s16, s10, s12
-; GFX9-NEXT:    s_addc_u32 s17, 0, s11
-; GFX9-NEXT:    s_mul_i32 s10, s6, s17
-; GFX9-NEXT:    s_mul_hi_u32 s11, s6, s16
+; GFX9-NEXT:    s_addc_u32 s11, s16, 0
+; GFX9-NEXT:    s_mul_i32 s14, s9, s15
+; GFX9-NEXT:    s_add_u32 s14, s10, s14
+; GFX9-NEXT:    s_addc_u32 s15, 0, s11
+; GFX9-NEXT:    s_mul_i32 s10, s6, s15
+; GFX9-NEXT:    s_mul_hi_u32 s11, s6, s14
 ; GFX9-NEXT:    s_add_i32 s10, s11, s10
-; GFX9-NEXT:    s_mul_i32 s11, s7, s16
-; GFX9-NEXT:    s_add_i32 s18, s10, s11
-; GFX9-NEXT:    s_sub_i32 s12, s9, s18
-; GFX9-NEXT:    s_mul_i32 s10, s6, s16
+; GFX9-NEXT:    s_mul_i32 s11, s7, s14
+; GFX9-NEXT:    s_add_i32 s16, s10, s11
+; GFX9-NEXT:    s_sub_i32 s17, s9, s16
+; GFX9-NEXT:    s_mul_i32 s10, s6, s14
 ; GFX9-NEXT:    s_sub_i32 s8, s8, s10
 ; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT:    s_subb_u32 s19, s12, s7
-; GFX9-NEXT:    s_sub_i32 s20, s8, s6
-; GFX9-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
-; GFX9-NEXT:    s_subb_u32 s12, s19, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s12, s7
-; GFX9-NEXT:    s_cselect_b32 s13, -1, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s20, s6
+; GFX9-NEXT:    s_subb_u32 s17, s17, s7
+; GFX9-NEXT:    s_sub_i32 s18, s8, s6
+; GFX9-NEXT:    s_subb_u32 s17, s17, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s17, s7
 ; GFX9-NEXT:    s_cselect_b32 s19, -1, 0
-; GFX9-NEXT:    s_cmp_eq_u32 s12, s7
-; GFX9-NEXT:    s_cselect_b32 s12, s19, s13
-; GFX9-NEXT:    s_add_u32 s13, s16, 1
-; GFX9-NEXT:    s_addc_u32 s19, s17, 0
-; GFX9-NEXT:    s_add_u32 s20, s16, 2
-; GFX9-NEXT:    s_addc_u32 s21, s17, 0
-; GFX9-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX9-NEXT:    s_cselect_b32 s12, s20, s13
-; GFX9-NEXT:    s_cselect_b32 s13, s21, s19
+; GFX9-NEXT:    s_cmp_ge_u32 s18, s6
+; GFX9-NEXT:    s_cselect_b32 s18, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s17, s7
+; GFX9-NEXT:    s_cselect_b32 s17, s18, s19
+; GFX9-NEXT:    s_add_u32 s18, s14, 1
+; GFX9-NEXT:    s_addc_u32 s19, s15, 0
+; GFX9-NEXT:    s_add_u32 s20, s14, 2
+; GFX9-NEXT:    s_addc_u32 s21, s15, 0
+; GFX9-NEXT:    s_cmp_lg_u32 s17, 0
+; GFX9-NEXT:    s_cselect_b32 s17, s20, s18
+; GFX9-NEXT:    s_cselect_b32 s18, s21, s19
 ; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT:    s_subb_u32 s9, s9, s18
+; GFX9-NEXT:    s_subb_u32 s9, s9, s16
 ; GFX9-NEXT:    s_cmp_ge_u32 s9, s7
 ; GFX9-NEXT:    s_cselect_b32 s10, -1, 0
 ; GFX9-NEXT:    s_cmp_ge_u32 s8, s6
@@ -8883,14 +8832,14 @@ define amdgpu_kernel void @sdiv_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    s_cmp_eq_u32 s9, s7
 ; GFX9-NEXT:    s_cselect_b32 s6, s6, s10
 ; GFX9-NEXT:    s_cmp_lg_u32 s6, 0
-; GFX9-NEXT:    s_cselect_b32 s7, s13, s17
-; GFX9-NEXT:    s_cselect_b32 s6, s12, s16
+; GFX9-NEXT:    s_cselect_b32 s7, s18, s15
+; GFX9-NEXT:    s_cselect_b32 s6, s17, s14
 ; GFX9-NEXT:    s_xor_b64 s[2:3], s[4:5], s[2:3]
 ; GFX9-NEXT:    s_xor_b64 s[4:5], s[6:7], s[2:3]
 ; GFX9-NEXT:    s_sub_u32 s2, s4, s2
 ; GFX9-NEXT:    s_subb_u32 s3, s5, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s14
-; GFX9-NEXT:    v_mov_b32_e32 v2, s15
+; GFX9-NEXT:    v_mov_b32_e32 v1, s12
+; GFX9-NEXT:    v_mov_b32_e32 v2, s13
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s2
 ; GFX9-NEXT:    v_mov_b32_e32 v4, s3
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
@@ -9070,107 +9019,100 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    s_xor_b64 s[8:9], s[0:1], s[2:3]
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s8
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s9
-; GFX6-NEXT:    s_sub_u32 s10, 0, s8
-; GFX6-NEXT:    s_subb_u32 s11, 0, s9
+; GFX6-NEXT:    s_sub_u32 s0, 0, s8
+; GFX6-NEXT:    s_subb_u32 s1, 0, s9
 ; GFX6-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
-; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GFX6-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GFX6-NEXT:    v_trunc_f32_e32 v1, v1
 ; GFX6-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX6-NEXT:    v_mul_hi_u32 v2, s10, v0
-; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
-; GFX6-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX6-NEXT:    s_mul_i32 s1, s10, s12
-; GFX6-NEXT:    v_readfirstlane_b32 s15, v2
-; GFX6-NEXT:    s_mul_i32 s13, s11, s0
-; GFX6-NEXT:    s_mul_i32 s14, s10, s0
-; GFX6-NEXT:    s_add_i32 s1, s15, s1
-; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s14
-; GFX6-NEXT:    s_add_i32 s1, s1, s13
-; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s1
-; GFX6-NEXT:    v_mul_hi_u32 v4, v1, s14
-; GFX6-NEXT:    v_readfirstlane_b32 s13, v3
-; GFX6-NEXT:    s_mul_i32 s15, s0, s1
-; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s1
-; GFX6-NEXT:    s_add_u32 s13, s13, s15
-; GFX6-NEXT:    v_readfirstlane_b32 s15, v0
-; GFX6-NEXT:    s_addc_u32 s15, 0, s15
-; GFX6-NEXT:    s_mul_i32 s14, s12, s14
-; GFX6-NEXT:    v_readfirstlane_b32 s16, v4
-; GFX6-NEXT:    s_add_u32 s13, s13, s14
-; GFX6-NEXT:    s_addc_u32 s13, s15, s16
-; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
-; GFX6-NEXT:    s_addc_u32 s14, s14, 0
-; GFX6-NEXT:    s_mul_i32 s1, s12, s1
-; GFX6-NEXT:    s_add_u32 s1, s13, s1
-; GFX6-NEXT:    s_addc_u32 s13, 0, s14
-; GFX6-NEXT:    s_add_i32 s14, s0, s1
-; GFX6-NEXT:    v_mov_b32_e32 v0, s14
-; GFX6-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s10, v0
-; GFX6-NEXT:    s_or_b32 s0, s0, s1
-; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
-; GFX6-NEXT:    s_addc_u32 s12, s12, s13
-; GFX6-NEXT:    s_mul_i32 s0, s10, s12
-; GFX6-NEXT:    v_readfirstlane_b32 s1, v0
-; GFX6-NEXT:    s_add_i32 s0, s1, s0
-; GFX6-NEXT:    s_mul_i32 s11, s11, s14
-; GFX6-NEXT:    s_mul_i32 s1, s10, s14
-; GFX6-NEXT:    s_add_i32 s0, s0, s11
-; GFX6-NEXT:    v_mov_b32_e32 v2, s1
-; GFX6-NEXT:    v_mov_b32_e32 v0, s0
-; GFX6-NEXT:    v_mul_hi_u32 v3, s12, v2
-; GFX6-NEXT:    v_mul_hi_u32 v2, s14, v2
-; GFX6-NEXT:    v_mul_hi_u32 v1, s12, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s14, v0
-; GFX6-NEXT:    s_mul_i32 s11, s14, s0
-; GFX6-NEXT:    v_readfirstlane_b32 s15, v2
-; GFX6-NEXT:    s_add_u32 s11, s15, s11
-; GFX6-NEXT:    v_readfirstlane_b32 s13, v0
-; GFX6-NEXT:    s_mul_i32 s1, s12, s1
-; GFX6-NEXT:    s_addc_u32 s13, 0, s13
-; GFX6-NEXT:    v_readfirstlane_b32 s10, v3
-; GFX6-NEXT:    s_add_u32 s1, s11, s1
-; GFX6-NEXT:    s_addc_u32 s1, s13, s10
+; GFX6-NEXT:    v_mul_hi_u32 v2, s0, v0
 ; GFX6-NEXT:    v_readfirstlane_b32 s10, v1
-; GFX6-NEXT:    s_addc_u32 s10, s10, 0
-; GFX6-NEXT:    s_mul_i32 s0, s12, s0
-; GFX6-NEXT:    s_add_u32 s0, s1, s0
-; GFX6-NEXT:    s_addc_u32 s10, 0, s10
-; GFX6-NEXT:    s_add_i32 s13, s14, s0
-; GFX6-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GFX6-NEXT:    s_or_b32 s0, s0, s1
-; GFX6-NEXT:    s_cmp_lg_u32 s0, 0
-; GFX6-NEXT:    s_addc_u32 s12, s12, s10
+; GFX6-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX6-NEXT:    s_mul_i32 s11, s0, s10
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v2
+; GFX6-NEXT:    s_mul_i32 s12, s1, s2
+; GFX6-NEXT:    s_mul_i32 s13, s0, s2
+; GFX6-NEXT:    s_add_i32 s11, s14, s11
+; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s13
+; GFX6-NEXT:    s_add_i32 s11, s11, s12
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s11
+; GFX6-NEXT:    v_mul_hi_u32 v4, v1, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v3
+; GFX6-NEXT:    s_mul_i32 s14, s2, s11
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s11
+; GFX6-NEXT:    s_add_u32 s12, s12, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v0
+; GFX6-NEXT:    s_mul_i32 s13, s10, s13
+; GFX6-NEXT:    s_addc_u32 s14, 0, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v4
+; GFX6-NEXT:    s_add_u32 s12, s12, s13
+; GFX6-NEXT:    s_addc_u32 s12, s14, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s13, v1
+; GFX6-NEXT:    s_addc_u32 s13, s13, 0
+; GFX6-NEXT:    s_mul_i32 s11, s10, s11
+; GFX6-NEXT:    s_add_u32 s11, s12, s11
+; GFX6-NEXT:    s_addc_u32 s12, 0, s13
+; GFX6-NEXT:    s_add_i32 s11, s2, s11
+; GFX6-NEXT:    v_mov_b32_e32 v0, s11
+; GFX6-NEXT:    v_mul_hi_u32 v0, s0, v0
+; GFX6-NEXT:    s_addc_u32 s10, s10, s12
+; GFX6-NEXT:    s_mul_i32 s12, s0, s10
+; GFX6-NEXT:    s_mul_i32 s1, s1, s11
+; GFX6-NEXT:    v_readfirstlane_b32 s13, v0
+; GFX6-NEXT:    s_add_i32 s12, s13, s12
+; GFX6-NEXT:    s_mul_i32 s0, s0, s11
+; GFX6-NEXT:    s_add_i32 s1, s12, s1
+; GFX6-NEXT:    v_mov_b32_e32 v2, s0
+; GFX6-NEXT:    v_mov_b32_e32 v0, s1
+; GFX6-NEXT:    v_mul_hi_u32 v3, s10, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s11, v2
+; GFX6-NEXT:    v_mul_hi_u32 v1, s10, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GFX6-NEXT:    s_mul_i32 s13, s11, s1
+; GFX6-NEXT:    v_readfirstlane_b32 s15, v2
+; GFX6-NEXT:    s_add_u32 s13, s15, s13
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v0
+; GFX6-NEXT:    s_mul_i32 s0, s10, s0
+; GFX6-NEXT:    s_addc_u32 s14, 0, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v3
+; GFX6-NEXT:    s_add_u32 s0, s13, s0
+; GFX6-NEXT:    s_addc_u32 s0, s14, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
+; GFX6-NEXT:    s_addc_u32 s12, s12, 0
+; GFX6-NEXT:    s_mul_i32 s1, s10, s1
+; GFX6-NEXT:    s_add_u32 s0, s0, s1
+; GFX6-NEXT:    s_addc_u32 s1, 0, s12
+; GFX6-NEXT:    s_add_i32 s12, s11, s0
+; GFX6-NEXT:    s_addc_u32 s13, s10, s1
 ; GFX6-NEXT:    s_ashr_i32 s10, s7, 31
 ; GFX6-NEXT:    s_add_u32 s0, s6, s10
 ; GFX6-NEXT:    s_mov_b32 s11, s10
 ; GFX6-NEXT:    s_addc_u32 s1, s7, s10
 ; GFX6-NEXT:    s_xor_b64 s[6:7], s[0:1], s[10:11]
-; GFX6-NEXT:    v_mov_b32_e32 v0, s12
+; GFX6-NEXT:    v_mov_b32_e32 v0, s13
 ; GFX6-NEXT:    v_mul_hi_u32 v1, s6, v0
-; GFX6-NEXT:    v_mov_b32_e32 v2, s13
+; GFX6-NEXT:    v_mov_b32_e32 v2, s12
 ; GFX6-NEXT:    v_mul_hi_u32 v3, s6, v2
 ; GFX6-NEXT:    s_mov_b32 s0, s4
 ; GFX6-NEXT:    v_readfirstlane_b32 s4, v1
 ; GFX6-NEXT:    v_mul_hi_u32 v1, s7, v2
-; GFX6-NEXT:    s_mul_i32 s1, s6, s12
+; GFX6-NEXT:    s_mul_i32 s1, s6, s13
 ; GFX6-NEXT:    v_readfirstlane_b32 s14, v3
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s7, v0
 ; GFX6-NEXT:    s_add_u32 s1, s14, s1
 ; GFX6-NEXT:    s_addc_u32 s4, 0, s4
-; GFX6-NEXT:    s_mul_i32 s13, s7, s13
+; GFX6-NEXT:    s_mul_i32 s12, s7, s12
 ; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
-; GFX6-NEXT:    s_add_u32 s1, s1, s13
+; GFX6-NEXT:    s_add_u32 s1, s1, s12
 ; GFX6-NEXT:    s_addc_u32 s1, s4, s14
 ; GFX6-NEXT:    v_readfirstlane_b32 s4, v0
 ; GFX6-NEXT:    s_addc_u32 s4, s4, 0
-; GFX6-NEXT:    s_mul_i32 s12, s7, s12
+; GFX6-NEXT:    s_mul_i32 s12, s7, s13
 ; GFX6-NEXT:    s_add_u32 s12, s1, s12
 ; GFX6-NEXT:    v_mov_b32_e32 v0, s12
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s8, v0
@@ -9180,50 +9122,46 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX6-NEXT:    v_readfirstlane_b32 s5, v0
 ; GFX6-NEXT:    s_add_i32 s4, s5, s4
 ; GFX6-NEXT:    s_mul_i32 s5, s9, s12
-; GFX6-NEXT:    s_add_i32 s13, s4, s5
-; GFX6-NEXT:    s_sub_i32 s14, s7, s13
+; GFX6-NEXT:    s_add_i32 s14, s4, s5
+; GFX6-NEXT:    s_sub_i32 s13, s7, s14
 ; GFX6-NEXT:    s_mul_i32 s4, s8, s12
 ; GFX6-NEXT:    s_sub_i32 s6, s6, s4
 ; GFX6-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX6-NEXT:    s_or_b32 s12, s4, s5
+; GFX6-NEXT:    s_subb_u32 s15, s13, s9
+; GFX6-NEXT:    s_sub_i32 s16, s6, s8
+; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX6-NEXT:    s_subb_u32 s17, s15, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s17, s9
+; GFX6-NEXT:    s_cselect_b32 s18, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s16, s8
+; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s17, s9
+; GFX6-NEXT:    s_cselect_b32 s18, s19, s18
+; GFX6-NEXT:    s_or_b32 s12, s12, s13
 ; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX6-NEXT:    s_subb_u32 s14, s14, s9
-; GFX6-NEXT:    s_sub_i32 s15, s6, s8
-; GFX6-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GFX6-NEXT:    s_subb_u32 s12, s15, s9
+; GFX6-NEXT:    s_sub_i32 s13, s16, s8
+; GFX6-NEXT:    s_subb_u32 s12, s12, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s18, 0
+; GFX6-NEXT:    s_cselect_b32 s13, s13, s16
+; GFX6-NEXT:    s_cselect_b32 s12, s12, s17
 ; GFX6-NEXT:    s_or_b32 s4, s4, s5
 ; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX6-NEXT:    s_subb_u32 s16, s14, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s16, s9
+; GFX6-NEXT:    s_subb_u32 s4, s7, s14
+; GFX6-NEXT:    s_cmp_ge_u32 s4, s9
 ; GFX6-NEXT:    s_cselect_b32 s5, -1, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s15, s8
-; GFX6-NEXT:    s_cselect_b32 s17, -1, 0
-; GFX6-NEXT:    s_cmp_eq_u32 s16, s9
-; GFX6-NEXT:    s_cselect_b32 s17, s17, s5
-; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX6-NEXT:    s_subb_u32 s14, s14, s9
-; GFX6-NEXT:    s_sub_i32 s18, s15, s8
-; GFX6-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX6-NEXT:    s_or_b32 s4, s4, s5
-; GFX6-NEXT:    s_cmp_lg_u32 s4, 0
-; GFX6-NEXT:    s_subb_u32 s4, s14, 0
-; GFX6-NEXT:    s_cmp_lg_u32 s17, 0
-; GFX6-NEXT:    s_cselect_b32 s14, s18, s15
-; GFX6-NEXT:    s_cselect_b32 s4, s4, s16
-; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX6-NEXT:    s_subb_u32 s5, s7, s13
-; GFX6-NEXT:    s_cmp_ge_u32 s5, s9
-; GFX6-NEXT:    s_cselect_b32 s7, -1, 0
 ; GFX6-NEXT:    s_cmp_ge_u32 s6, s8
-; GFX6-NEXT:    s_cselect_b32 s8, -1, 0
-; GFX6-NEXT:    s_cmp_eq_u32 s5, s9
-; GFX6-NEXT:    s_cselect_b32 s7, s8, s7
-; GFX6-NEXT:    s_cmp_lg_u32 s7, 0
-; GFX6-NEXT:    s_cselect_b32 s5, s4, s5
-; GFX6-NEXT:    s_cselect_b32 s4, s14, s6
+; GFX6-NEXT:    s_cselect_b32 s7, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s4, s9
+; GFX6-NEXT:    s_cselect_b32 s5, s7, s5
+; GFX6-NEXT:    s_cmp_lg_u32 s5, 0
+; GFX6-NEXT:    s_cselect_b32 s5, s12, s4
+; GFX6-NEXT:    s_cselect_b32 s4, s13, s6
 ; GFX6-NEXT:    s_xor_b64 s[4:5], s[4:5], s[10:11]
 ; GFX6-NEXT:    s_sub_u32 s4, s4, s10
 ; GFX6-NEXT:    s_subb_u32 s5, s5, s10
 ; GFX6-NEXT:    v_mov_b32_e32 v0, s4
+; GFX6-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX6-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
@@ -9241,8 +9179,8 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s7
 ; GFX9-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX9-NEXT:    s_sub_u32 s8, 0, s6
-; GFX9-NEXT:    s_subb_u32 s9, 0, s7
+; GFX9-NEXT:    s_sub_u32 s4, 0, s6
+; GFX9-NEXT:    s_subb_u32 s5, 0, s7
 ; GFX9-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
@@ -9252,73 +9190,69 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    v_madmk_f32 v1, v2, 0xcf800000, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s10, v2
-; GFX9-NEXT:    v_readfirstlane_b32 s4, v1
-; GFX9-NEXT:    s_mul_i32 s5, s8, s10
-; GFX9-NEXT:    s_mul_hi_u32 s12, s8, s4
-; GFX9-NEXT:    s_mul_i32 s11, s9, s4
-; GFX9-NEXT:    s_add_i32 s5, s12, s5
-; GFX9-NEXT:    s_mul_i32 s13, s8, s4
-; GFX9-NEXT:    s_add_i32 s5, s5, s11
-; GFX9-NEXT:    s_mul_hi_u32 s12, s4, s13
-; GFX9-NEXT:    s_mul_i32 s14, s4, s5
-; GFX9-NEXT:    s_mul_hi_u32 s11, s4, s5
+; GFX9-NEXT:    v_readfirstlane_b32 s8, v2
+; GFX9-NEXT:    v_readfirstlane_b32 s9, v1
+; GFX9-NEXT:    s_mul_i32 s10, s4, s8
+; GFX9-NEXT:    s_mul_hi_u32 s12, s4, s9
+; GFX9-NEXT:    s_mul_i32 s11, s5, s9
+; GFX9-NEXT:    s_add_i32 s10, s12, s10
+; GFX9-NEXT:    s_mul_i32 s13, s4, s9
+; GFX9-NEXT:    s_add_i32 s10, s10, s11
+; GFX9-NEXT:    s_mul_hi_u32 s12, s9, s13
+; GFX9-NEXT:    s_mul_i32 s14, s9, s10
+; GFX9-NEXT:    s_mul_hi_u32 s11, s9, s10
 ; GFX9-NEXT:    s_add_u32 s12, s12, s14
 ; GFX9-NEXT:    s_addc_u32 s11, 0, s11
-; GFX9-NEXT:    s_mul_hi_u32 s15, s10, s13
-; GFX9-NEXT:    s_mul_i32 s13, s10, s13
+; GFX9-NEXT:    s_mul_hi_u32 s15, s8, s13
+; GFX9-NEXT:    s_mul_i32 s13, s8, s13
 ; GFX9-NEXT:    s_add_u32 s12, s12, s13
-; GFX9-NEXT:    s_mul_hi_u32 s14, s10, s5
+; GFX9-NEXT:    s_mul_hi_u32 s14, s8, s10
 ; GFX9-NEXT:    s_addc_u32 s11, s11, s15
 ; GFX9-NEXT:    s_addc_u32 s12, s14, 0
-; GFX9-NEXT:    s_mul_i32 s5, s10, s5
-; GFX9-NEXT:    s_add_u32 s5, s11, s5
+; GFX9-NEXT:    s_mul_i32 s10, s8, s10
+; GFX9-NEXT:    s_add_u32 s10, s11, s10
 ; GFX9-NEXT:    s_addc_u32 s11, 0, s12
-; GFX9-NEXT:    s_add_i32 s12, s4, s5
-; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT:    s_addc_u32 s10, s10, s11
-; GFX9-NEXT:    s_mul_i32 s4, s8, s10
-; GFX9-NEXT:    s_mul_hi_u32 s5, s8, s12
-; GFX9-NEXT:    s_add_i32 s4, s5, s4
-; GFX9-NEXT:    s_mul_i32 s9, s9, s12
-; GFX9-NEXT:    s_add_i32 s4, s4, s9
-; GFX9-NEXT:    s_mul_i32 s8, s8, s12
-; GFX9-NEXT:    s_mul_hi_u32 s9, s10, s8
-; GFX9-NEXT:    s_mul_i32 s11, s10, s8
-; GFX9-NEXT:    s_mul_i32 s14, s12, s4
-; GFX9-NEXT:    s_mul_hi_u32 s8, s12, s8
-; GFX9-NEXT:    s_mul_hi_u32 s13, s12, s4
-; GFX9-NEXT:    s_add_u32 s8, s8, s14
+; GFX9-NEXT:    s_add_i32 s9, s9, s10
+; GFX9-NEXT:    s_addc_u32 s8, s8, s11
+; GFX9-NEXT:    s_mul_i32 s10, s4, s8
+; GFX9-NEXT:    s_mul_hi_u32 s11, s4, s9
+; GFX9-NEXT:    s_add_i32 s10, s11, s10
+; GFX9-NEXT:    s_mul_i32 s5, s5, s9
+; GFX9-NEXT:    s_add_i32 s10, s10, s5
+; GFX9-NEXT:    s_mul_i32 s4, s4, s9
+; GFX9-NEXT:    s_mul_hi_u32 s11, s8, s4
+; GFX9-NEXT:    s_mul_i32 s12, s8, s4
+; GFX9-NEXT:    s_mul_i32 s14, s9, s10
+; GFX9-NEXT:    s_mul_hi_u32 s4, s9, s4
+; GFX9-NEXT:    s_mul_hi_u32 s13, s9, s10
+; GFX9-NEXT:    s_add_u32 s4, s4, s14
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s13
-; GFX9-NEXT:    s_add_u32 s8, s8, s11
-; GFX9-NEXT:    s_mul_hi_u32 s5, s10, s4
-; GFX9-NEXT:    s_addc_u32 s8, s13, s9
+; GFX9-NEXT:    s_add_u32 s4, s4, s12
+; GFX9-NEXT:    s_mul_hi_u32 s5, s8, s10
+; GFX9-NEXT:    s_addc_u32 s4, s13, s11
 ; GFX9-NEXT:    s_addc_u32 s5, s5, 0
-; GFX9-NEXT:    s_mul_i32 s4, s10, s4
-; GFX9-NEXT:    s_add_u32 s4, s8, s4
-; GFX9-NEXT:    s_addc_u32 s8, 0, s5
-; GFX9-NEXT:    s_add_i32 s12, s12, s4
-; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT:    s_addc_u32 s8, s10, s8
+; GFX9-NEXT:    s_mul_i32 s10, s8, s10
+; GFX9-NEXT:    s_add_u32 s4, s4, s10
+; GFX9-NEXT:    s_addc_u32 s5, 0, s5
+; GFX9-NEXT:    s_add_i32 s9, s9, s4
+; GFX9-NEXT:    s_addc_u32 s8, s8, s5
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_ashr_i32 s4, s3, 31
 ; GFX9-NEXT:    s_add_u32 s2, s2, s4
 ; GFX9-NEXT:    s_mov_b32 s5, s4
 ; GFX9-NEXT:    s_addc_u32 s3, s3, s4
 ; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
-; GFX9-NEXT:    s_mul_i32 s10, s2, s8
-; GFX9-NEXT:    s_mul_hi_u32 s11, s2, s12
-; GFX9-NEXT:    s_mul_hi_u32 s9, s2, s8
-; GFX9-NEXT:    s_add_u32 s10, s11, s10
-; GFX9-NEXT:    s_addc_u32 s9, 0, s9
-; GFX9-NEXT:    s_mul_hi_u32 s13, s3, s12
-; GFX9-NEXT:    s_mul_i32 s12, s3, s12
-; GFX9-NEXT:    s_add_u32 s10, s10, s12
-; GFX9-NEXT:    s_mul_hi_u32 s11, s3, s8
-; GFX9-NEXT:    s_addc_u32 s9, s9, s13
-; GFX9-NEXT:    s_addc_u32 s10, s11, 0
+; GFX9-NEXT:    s_mul_i32 s11, s2, s8
+; GFX9-NEXT:    s_mul_hi_u32 s12, s2, s9
+; GFX9-NEXT:    s_mul_hi_u32 s10, s2, s8
+; GFX9-NEXT:    s_add_u32 s11, s12, s11
+; GFX9-NEXT:    s_addc_u32 s10, 0, s10
+; GFX9-NEXT:    s_mul_hi_u32 s13, s3, s9
+; GFX9-NEXT:    s_mul_i32 s9, s3, s9
+; GFX9-NEXT:    s_add_u32 s9, s11, s9
+; GFX9-NEXT:    s_mul_hi_u32 s12, s3, s8
+; GFX9-NEXT:    s_addc_u32 s9, s10, s13
+; GFX9-NEXT:    s_addc_u32 s10, s12, 0
 ; GFX9-NEXT:    s_mul_i32 s8, s3, s8
 ; GFX9-NEXT:    s_add_u32 s8, s9, s8
 ; GFX9-NEXT:    s_addc_u32 s9, 0, s10
@@ -9331,11 +9265,9 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    s_mul_i32 s8, s6, s8
 ; GFX9-NEXT:    s_sub_i32 s2, s2, s8
 ; GFX9-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
 ; GFX9-NEXT:    s_subb_u32 s13, s10, s7
 ; GFX9-NEXT:    s_sub_i32 s14, s2, s6
 ; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
 ; GFX9-NEXT:    s_subb_u32 s15, s13, 0
 ; GFX9-NEXT:    s_cmp_ge_u32 s15, s7
 ; GFX9-NEXT:    s_cselect_b32 s16, -1, 0
@@ -9344,13 +9276,11 @@ define amdgpu_kernel void @srem_i64_pow2_shl_denom(ptr addrspace(1) %out, i64 %x
 ; GFX9-NEXT:    s_cmp_eq_u32 s15, s7
 ; GFX9-NEXT:    s_cselect_b32 s16, s17, s16
 ; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT:    s_subb_u32 s13, s13, s7
-; GFX9-NEXT:    s_sub_i32 s17, s14, s6
-; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT:    s_subb_u32 s10, s13, 0
+; GFX9-NEXT:    s_subb_u32 s10, s13, s7
+; GFX9-NEXT:    s_sub_i32 s11, s14, s6
+; GFX9-NEXT:    s_subb_u32 s10, s10, 0
 ; GFX9-NEXT:    s_cmp_lg_u32 s16, 0
-; GFX9-NEXT:    s_cselect_b32 s11, s17, s14
+; GFX9-NEXT:    s_cselect_b32 s11, s11, s14
 ; GFX9-NEXT:    s_cselect_b32 s10, s10, s15
 ; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
 ; GFX9-NEXT:    s_subb_u32 s3, s3, s12
@@ -9473,8 +9403,8 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s2
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GFX6-NEXT:    s_sub_u32 s12, 0, s2
-; GFX6-NEXT:    s_subb_u32 s13, 0, s3
+; GFX6-NEXT:    s_sub_u32 s6, 0, s2
+; GFX6-NEXT:    s_subb_u32 s7, 0, s3
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -9483,71 +9413,65 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX6-NEXT:    v_mul_hi_u32 v2, s12, v0
-; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
-; GFX6-NEXT:    v_readfirstlane_b32 s6, v0
-; GFX6-NEXT:    s_mul_i32 s7, s12, s14
+; GFX6-NEXT:    v_mul_hi_u32 v2, s6, v0
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
+; GFX6-NEXT:    v_readfirstlane_b32 s13, v0
+; GFX6-NEXT:    s_mul_i32 s14, s6, s12
 ; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
-; GFX6-NEXT:    s_mul_i32 s15, s13, s6
-; GFX6-NEXT:    s_mul_i32 s16, s12, s6
-; GFX6-NEXT:    s_add_i32 s7, s17, s7
+; GFX6-NEXT:    s_mul_i32 s15, s7, s13
+; GFX6-NEXT:    s_mul_i32 s16, s6, s13
+; GFX6-NEXT:    s_add_i32 s14, s17, s14
 ; GFX6-NEXT:    v_mul_hi_u32 v3, v0, s16
-; GFX6-NEXT:    s_add_i32 s7, s7, s15
-; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s7
+; GFX6-NEXT:    s_add_i32 s14, s14, s15
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s14
 ; GFX6-NEXT:    v_mul_hi_u32 v4, v1, s16
 ; GFX6-NEXT:    v_readfirstlane_b32 s15, v3
-; GFX6-NEXT:    s_mul_i32 s18, s6, s7
-; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s7
+; GFX6-NEXT:    s_mul_i32 s18, s13, s14
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s14
 ; GFX6-NEXT:    s_add_u32 s15, s15, s18
 ; GFX6-NEXT:    v_readfirstlane_b32 s18, v0
-; GFX6-NEXT:    s_mul_i32 s16, s14, s16
+; GFX6-NEXT:    s_mul_i32 s16, s12, s16
 ; GFX6-NEXT:    s_addc_u32 s18, 0, s18
 ; GFX6-NEXT:    v_readfirstlane_b32 s17, v4
 ; GFX6-NEXT:    s_add_u32 s15, s15, s16
 ; GFX6-NEXT:    s_addc_u32 s15, s18, s17
 ; GFX6-NEXT:    v_readfirstlane_b32 s16, v1
 ; GFX6-NEXT:    s_addc_u32 s16, s16, 0
-; GFX6-NEXT:    s_mul_i32 s7, s14, s7
-; GFX6-NEXT:    s_add_u32 s7, s15, s7
+; GFX6-NEXT:    s_mul_i32 s14, s12, s14
+; GFX6-NEXT:    s_add_u32 s14, s15, s14
 ; GFX6-NEXT:    s_addc_u32 s15, 0, s16
-; GFX6-NEXT:    s_add_i32 s16, s6, s7
-; GFX6-NEXT:    v_mov_b32_e32 v0, s16
-; GFX6-NEXT:    s_cselect_b64 s[6:7], 1, 0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
-; GFX6-NEXT:    s_or_b32 s6, s6, s7
-; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
-; GFX6-NEXT:    s_addc_u32 s14, s14, s15
-; GFX6-NEXT:    s_mul_i32 s6, s12, s14
-; GFX6-NEXT:    v_readfirstlane_b32 s7, v0
-; GFX6-NEXT:    s_add_i32 s6, s7, s6
-; GFX6-NEXT:    s_mul_i32 s13, s13, s16
-; GFX6-NEXT:    s_mul_i32 s7, s12, s16
-; GFX6-NEXT:    s_add_i32 s6, s6, s13
-; GFX6-NEXT:    v_mov_b32_e32 v2, s7
-; GFX6-NEXT:    v_mov_b32_e32 v0, s6
-; GFX6-NEXT:    v_mul_hi_u32 v3, s14, v2
-; GFX6-NEXT:    v_mul_hi_u32 v2, s16, v2
-; GFX6-NEXT:    v_mul_hi_u32 v1, s14, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s16, v0
-; GFX6-NEXT:    s_mul_i32 s13, s16, s6
-; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
-; GFX6-NEXT:    s_add_u32 s13, s17, s13
+; GFX6-NEXT:    s_add_i32 s13, s13, s14
+; GFX6-NEXT:    v_mov_b32_e32 v0, s13
+; GFX6-NEXT:    v_mul_hi_u32 v0, s6, v0
+; GFX6-NEXT:    s_addc_u32 s12, s12, s15
+; GFX6-NEXT:    s_mul_i32 s14, s6, s12
+; GFX6-NEXT:    s_mul_i32 s7, s7, s13
 ; GFX6-NEXT:    v_readfirstlane_b32 s15, v0
-; GFX6-NEXT:    s_mul_i32 s7, s14, s7
-; GFX6-NEXT:    s_addc_u32 s15, 0, s15
-; GFX6-NEXT:    v_readfirstlane_b32 s12, v3
-; GFX6-NEXT:    s_add_u32 s7, s13, s7
-; GFX6-NEXT:    s_addc_u32 s7, s15, s12
-; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
-; GFX6-NEXT:    s_addc_u32 s12, s12, 0
-; GFX6-NEXT:    s_mul_i32 s6, s14, s6
-; GFX6-NEXT:    s_add_u32 s6, s7, s6
-; GFX6-NEXT:    s_addc_u32 s12, 0, s12
-; GFX6-NEXT:    s_add_i32 s13, s16, s6
-; GFX6-NEXT:    s_cselect_b64 s[6:7], 1, 0
-; GFX6-NEXT:    s_or_b32 s6, s6, s7
-; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
-; GFX6-NEXT:    s_addc_u32 s12, s14, s12
+; GFX6-NEXT:    s_add_i32 s14, s15, s14
+; GFX6-NEXT:    s_mul_i32 s6, s6, s13
+; GFX6-NEXT:    s_add_i32 s7, s14, s7
+; GFX6-NEXT:    v_mov_b32_e32 v2, s6
+; GFX6-NEXT:    v_mov_b32_e32 v0, s7
+; GFX6-NEXT:    v_mul_hi_u32 v3, s12, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s13, v2
+; GFX6-NEXT:    v_mul_hi_u32 v1, s12, v0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s13, v0
+; GFX6-NEXT:    s_mul_i32 s15, s13, s7
+; GFX6-NEXT:    v_readfirstlane_b32 s17, v2
+; GFX6-NEXT:    s_add_u32 s15, s17, s15
+; GFX6-NEXT:    v_readfirstlane_b32 s16, v0
+; GFX6-NEXT:    s_mul_i32 s6, s12, s6
+; GFX6-NEXT:    s_addc_u32 s16, 0, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v3
+; GFX6-NEXT:    s_add_u32 s6, s15, s6
+; GFX6-NEXT:    s_addc_u32 s6, s16, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX6-NEXT:    s_addc_u32 s14, s14, 0
+; GFX6-NEXT:    s_mul_i32 s7, s12, s7
+; GFX6-NEXT:    s_add_u32 s6, s6, s7
+; GFX6-NEXT:    s_addc_u32 s7, 0, s14
+; GFX6-NEXT:    s_add_i32 s13, s13, s6
+; GFX6-NEXT:    s_addc_u32 s12, s12, s7
 ; GFX6-NEXT:    s_ashr_i32 s6, s9, 31
 ; GFX6-NEXT:    s_add_u32 s8, s8, s6
 ; GFX6-NEXT:    s_mov_b32 s7, s6
@@ -9579,49 +9503,44 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-NEXT:    v_readfirstlane_b32 s14, v0
 ; GFX6-NEXT:    s_add_i32 s13, s14, s13
 ; GFX6-NEXT:    s_mul_i32 s14, s3, s12
-; GFX6-NEXT:    s_add_i32 s14, s13, s14
-; GFX6-NEXT:    s_sub_i32 s15, s9, s14
+; GFX6-NEXT:    s_add_i32 s16, s13, s14
+; GFX6-NEXT:    s_sub_i32 s14, s9, s16
 ; GFX6-NEXT:    s_mul_i32 s12, s2, s12
 ; GFX6-NEXT:    s_sub_i32 s8, s8, s12
 ; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GFX6-NEXT:    s_or_b32 s16, s12, s13
-; GFX6-NEXT:    s_cmp_lg_u32 s16, 0
-; GFX6-NEXT:    s_subb_u32 s15, s15, s3
-; GFX6-NEXT:    s_sub_i32 s17, s8, s2
-; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GFX6-NEXT:    s_or_b32 s12, s12, s13
-; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX6-NEXT:    s_subb_u32 s18, s15, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s18, s3
-; GFX6-NEXT:    s_cselect_b32 s13, -1, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s17, s2
-; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
-; GFX6-NEXT:    s_cmp_eq_u32 s18, s3
-; GFX6-NEXT:    s_cselect_b32 s19, s19, s13
-; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX6-NEXT:    s_subb_u32 s15, s15, s3
-; GFX6-NEXT:    s_sub_i32 s20, s17, s2
-; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX6-NEXT:    s_subb_u32 s17, s14, s3
+; GFX6-NEXT:    s_sub_i32 s18, s8, s2
+; GFX6-NEXT:    s_cselect_b64 s[14:15], 1, 0
+; GFX6-NEXT:    s_subb_u32 s19, s17, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s19, s3
+; GFX6-NEXT:    s_cselect_b32 s20, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s18, s2
+; GFX6-NEXT:    s_cselect_b32 s21, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s19, s3
+; GFX6-NEXT:    s_cselect_b32 s20, s21, s20
+; GFX6-NEXT:    s_or_b32 s14, s14, s15
+; GFX6-NEXT:    s_cmp_lg_u32 s14, 0
+; GFX6-NEXT:    s_subb_u32 s14, s17, s3
+; GFX6-NEXT:    s_sub_i32 s15, s18, s2
+; GFX6-NEXT:    s_subb_u32 s14, s14, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s20, 0
+; GFX6-NEXT:    s_cselect_b32 s15, s15, s18
+; GFX6-NEXT:    s_cselect_b32 s14, s14, s19
 ; GFX6-NEXT:    s_or_b32 s12, s12, s13
 ; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
-; GFX6-NEXT:    s_subb_u32 s12, s15, 0
-; GFX6-NEXT:    s_cmp_lg_u32 s19, 0
-; GFX6-NEXT:    s_cselect_b32 s13, s20, s17
-; GFX6-NEXT:    s_cselect_b32 s12, s12, s18
-; GFX6-NEXT:    s_cmp_lg_u32 s16, 0
-; GFX6-NEXT:    s_subb_u32 s9, s9, s14
+; GFX6-NEXT:    s_subb_u32 s9, s9, s16
 ; GFX6-NEXT:    s_cmp_ge_u32 s9, s3
-; GFX6-NEXT:    s_cselect_b32 s14, -1, 0
+; GFX6-NEXT:    s_cselect_b32 s12, -1, 0
 ; GFX6-NEXT:    s_cmp_ge_u32 s8, s2
 ; GFX6-NEXT:    s_cselect_b32 s2, -1, 0
 ; GFX6-NEXT:    s_cmp_eq_u32 s9, s3
-; GFX6-NEXT:    s_cselect_b32 s2, s2, s14
+; GFX6-NEXT:    s_cselect_b32 s2, s2, s12
 ; GFX6-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX6-NEXT:    s_cselect_b32 s3, s12, s9
-; GFX6-NEXT:    s_cselect_b32 s2, s13, s8
+; GFX6-NEXT:    s_cselect_b32 s3, s14, s9
+; GFX6-NEXT:    s_cselect_b32 s2, s15, s8
 ; GFX6-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
-; GFX6-NEXT:    s_sub_u32 s12, s2, s6
-; GFX6-NEXT:    s_subb_u32 s13, s3, s6
+; GFX6-NEXT:    s_sub_u32 s14, s2, s6
+; GFX6-NEXT:    s_subb_u32 s15, s3, s6
 ; GFX6-NEXT:    s_ashr_i32 s2, s1, 31
 ; GFX6-NEXT:    s_add_u32 s0, s0, s2
 ; GFX6-NEXT:    s_mov_b32 s3, s2
@@ -9629,8 +9548,8 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-NEXT:    s_xor_b64 s[6:7], s[0:1], s[2:3]
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v0, s6
 ; GFX6-NEXT:    v_cvt_f32_u32_e32 v1, s7
-; GFX6-NEXT:    s_sub_u32 s8, 0, s6
-; GFX6-NEXT:    s_subb_u32 s9, 0, s7
+; GFX6-NEXT:    s_sub_u32 s2, 0, s6
+; GFX6-NEXT:    s_subb_u32 s3, 0, s7
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX6-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX6-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -9639,150 +9558,139 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX6-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX6-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GFX6-NEXT:    v_mul_hi_u32 v2, s8, v0
-; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
-; GFX6-NEXT:    v_readfirstlane_b32 s2, v0
-; GFX6-NEXT:    s_mul_i32 s1, s8, s14
-; GFX6-NEXT:    v_readfirstlane_b32 s3, v2
-; GFX6-NEXT:    s_mul_i32 s0, s9, s2
-; GFX6-NEXT:    s_add_i32 s1, s3, s1
-; GFX6-NEXT:    s_add_i32 s3, s1, s0
-; GFX6-NEXT:    s_mul_i32 s15, s8, s2
-; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s3
-; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s15
-; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
-; GFX6-NEXT:    s_mul_i32 s4, s2, s3
-; GFX6-NEXT:    v_readfirstlane_b32 s5, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s2, v0
+; GFX6-NEXT:    v_readfirstlane_b32 s8, v1
+; GFX6-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX6-NEXT:    s_mul_i32 s9, s2, s8
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v2
+; GFX6-NEXT:    s_mul_i32 s1, s3, s0
+; GFX6-NEXT:    s_add_i32 s9, s12, s9
+; GFX6-NEXT:    s_add_i32 s9, s9, s1
+; GFX6-NEXT:    s_mul_i32 s1, s2, s0
+; GFX6-NEXT:    v_mul_hi_u32 v2, v0, s9
+; GFX6-NEXT:    v_mul_hi_u32 v0, v0, s1
+; GFX6-NEXT:    s_mul_i32 s12, s0, s9
+; GFX6-NEXT:    v_readfirstlane_b32 s13, v2
 ; GFX6-NEXT:    v_readfirstlane_b32 s16, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, v1, s15
-; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s3
-; GFX6-NEXT:    s_add_u32 s4, s16, s4
-; GFX6-NEXT:    s_addc_u32 s5, 0, s5
-; GFX6-NEXT:    s_mul_i32 s15, s14, s15
+; GFX6-NEXT:    v_mul_hi_u32 v0, v1, s1
+; GFX6-NEXT:    v_mul_hi_u32 v1, v1, s9
+; GFX6-NEXT:    s_add_u32 s12, s16, s12
+; GFX6-NEXT:    s_addc_u32 s13, 0, s13
+; GFX6-NEXT:    s_mul_i32 s1, s8, s1
 ; GFX6-NEXT:    v_readfirstlane_b32 s16, v0
-; GFX6-NEXT:    s_add_u32 s4, s4, s15
-; GFX6-NEXT:    s_addc_u32 s4, s5, s16
-; GFX6-NEXT:    v_readfirstlane_b32 s5, v1
-; GFX6-NEXT:    s_addc_u32 s5, s5, 0
-; GFX6-NEXT:    s_mul_i32 s3, s14, s3
-; GFX6-NEXT:    s_add_u32 s3, s4, s3
-; GFX6-NEXT:    s_addc_u32 s4, 0, s5
-; GFX6-NEXT:    s_add_i32 s5, s2, s3
-; GFX6-NEXT:    v_mov_b32_e32 v0, s5
-; GFX6-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s8, v0
-; GFX6-NEXT:    s_or_b32 s2, s2, s3
-; GFX6-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX6-NEXT:    s_addc_u32 s4, s14, s4
-; GFX6-NEXT:    s_mul_i32 s2, s8, s4
-; GFX6-NEXT:    v_readfirstlane_b32 s3, v0
-; GFX6-NEXT:    s_add_i32 s2, s3, s2
-; GFX6-NEXT:    s_mul_i32 s9, s9, s5
-; GFX6-NEXT:    s_mul_i32 s3, s8, s5
-; GFX6-NEXT:    s_add_i32 s2, s2, s9
-; GFX6-NEXT:    v_mov_b32_e32 v2, s3
-; GFX6-NEXT:    v_mov_b32_e32 v0, s2
+; GFX6-NEXT:    s_add_u32 s1, s12, s1
+; GFX6-NEXT:    s_addc_u32 s1, s13, s16
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
+; GFX6-NEXT:    s_addc_u32 s12, s12, 0
+; GFX6-NEXT:    s_mul_i32 s9, s8, s9
+; GFX6-NEXT:    s_add_u32 s1, s1, s9
+; GFX6-NEXT:    s_addc_u32 s9, 0, s12
+; GFX6-NEXT:    s_add_i32 s12, s0, s1
+; GFX6-NEXT:    v_mov_b32_e32 v0, s12
+; GFX6-NEXT:    v_mul_hi_u32 v0, s2, v0
+; GFX6-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x9
+; GFX6-NEXT:    s_addc_u32 s4, s8, s9
+; GFX6-NEXT:    s_mul_i32 s5, s2, s4
+; GFX6-NEXT:    v_readfirstlane_b32 s8, v0
+; GFX6-NEXT:    s_add_i32 s5, s8, s5
+; GFX6-NEXT:    s_mul_i32 s3, s3, s12
+; GFX6-NEXT:    s_mul_i32 s2, s2, s12
+; GFX6-NEXT:    s_add_i32 s3, s5, s3
+; GFX6-NEXT:    v_mov_b32_e32 v2, s2
+; GFX6-NEXT:    v_mov_b32_e32 v0, s3
 ; GFX6-NEXT:    v_mul_hi_u32 v3, s4, v2
-; GFX6-NEXT:    v_mul_hi_u32 v2, s5, v2
+; GFX6-NEXT:    v_mul_hi_u32 v2, s12, v2
 ; GFX6-NEXT:    v_mul_hi_u32 v1, s4, v0
-; GFX6-NEXT:    v_mul_hi_u32 v0, s5, v0
-; GFX6-NEXT:    s_mul_i32 s9, s5, s2
-; GFX6-NEXT:    v_readfirstlane_b32 s15, v2
-; GFX6-NEXT:    s_add_u32 s9, s15, s9
-; GFX6-NEXT:    v_readfirstlane_b32 s14, v0
-; GFX6-NEXT:    s_mul_i32 s3, s4, s3
-; GFX6-NEXT:    s_addc_u32 s14, 0, s14
-; GFX6-NEXT:    v_readfirstlane_b32 s8, v3
-; GFX6-NEXT:    s_add_u32 s3, s9, s3
-; GFX6-NEXT:    s_addc_u32 s3, s14, s8
-; GFX6-NEXT:    v_readfirstlane_b32 s8, v1
-; GFX6-NEXT:    s_addc_u32 s8, s8, 0
+; GFX6-NEXT:    v_mul_hi_u32 v0, s12, v0
+; GFX6-NEXT:    s_mul_i32 s8, s12, s3
+; GFX6-NEXT:    v_readfirstlane_b32 s13, v2
+; GFX6-NEXT:    s_add_u32 s8, s13, s8
+; GFX6-NEXT:    v_readfirstlane_b32 s9, v0
 ; GFX6-NEXT:    s_mul_i32 s2, s4, s2
-; GFX6-NEXT:    s_add_u32 s2, s3, s2
-; GFX6-NEXT:    s_addc_u32 s8, 0, s8
-; GFX6-NEXT:    s_add_i32 s14, s5, s2
-; GFX6-NEXT:    s_cselect_b64 s[2:3], 1, 0
-; GFX6-NEXT:    s_or_b32 s2, s2, s3
-; GFX6-NEXT:    s_cmp_lg_u32 s2, 0
-; GFX6-NEXT:    s_addc_u32 s15, s4, s8
+; GFX6-NEXT:    s_addc_u32 s9, 0, s9
+; GFX6-NEXT:    v_readfirstlane_b32 s5, v3
+; GFX6-NEXT:    s_add_u32 s2, s8, s2
+; GFX6-NEXT:    s_addc_u32 s2, s9, s5
+; GFX6-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX6-NEXT:    s_addc_u32 s5, s5, 0
+; GFX6-NEXT:    s_mul_i32 s3, s4, s3
+; GFX6-NEXT:    s_add_u32 s2, s2, s3
+; GFX6-NEXT:    s_addc_u32 s3, 0, s5
+; GFX6-NEXT:    s_add_i32 s12, s12, s2
+; GFX6-NEXT:    s_addc_u32 s13, s4, s3
 ; GFX6-NEXT:    s_ashr_i32 s4, s11, 31
 ; GFX6-NEXT:    s_add_u32 s2, s10, s4
 ; GFX6-NEXT:    s_mov_b32 s5, s4
 ; GFX6-NEXT:    s_addc_u32 s3, s11, s4
 ; GFX6-NEXT:    s_xor_b64 s[8:9], s[2:3], s[4:5]
-; GFX6-NEXT:    v_mov_b32_e32 v0, s15
+; GFX6-NEXT:    v_mov_b32_e32 v0, s13
 ; GFX6-NEXT:    v_mul_hi_u32 v1, s8, v0
-; GFX6-NEXT:    v_mov_b32_e32 v2, s14
+; GFX6-NEXT:    v_mov_b32_e32 v2, s12
 ; GFX6-NEXT:    v_mul_hi_u32 v3, s8, v2
-; GFX6-NEXT:    s_mul_i32 s2, s8, s15
+; GFX6-NEXT:    s_mul_i32 s2, s8, s13
 ; GFX6-NEXT:    v_readfirstlane_b32 s10, v1
 ; GFX6-NEXT:    v_mul_hi_u32 v1, s9, v2
 ; GFX6-NEXT:    v_readfirstlane_b32 s11, v3
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s9, v0
 ; GFX6-NEXT:    s_add_u32 s2, s11, s2
 ; GFX6-NEXT:    s_addc_u32 s10, 0, s10
-; GFX6-NEXT:    s_mul_i32 s11, s9, s14
-; GFX6-NEXT:    v_readfirstlane_b32 s14, v1
+; GFX6-NEXT:    s_mul_i32 s11, s9, s12
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v1
 ; GFX6-NEXT:    s_add_u32 s2, s2, s11
-; GFX6-NEXT:    s_addc_u32 s2, s10, s14
+; GFX6-NEXT:    s_addc_u32 s2, s10, s12
 ; GFX6-NEXT:    v_readfirstlane_b32 s10, v0
 ; GFX6-NEXT:    s_addc_u32 s10, s10, 0
-; GFX6-NEXT:    s_mul_i32 s11, s9, s15
+; GFX6-NEXT:    s_mul_i32 s11, s9, s13
 ; GFX6-NEXT:    s_add_u32 s11, s2, s11
 ; GFX6-NEXT:    v_mov_b32_e32 v0, s11
 ; GFX6-NEXT:    v_mul_hi_u32 v0, s6, v0
 ; GFX6-NEXT:    s_addc_u32 s10, 0, s10
 ; GFX6-NEXT:    s_mul_i32 s10, s6, s10
 ; GFX6-NEXT:    s_mov_b32 s3, 0xf000
-; GFX6-NEXT:    v_readfirstlane_b32 s14, v0
-; GFX6-NEXT:    s_add_i32 s10, s14, s10
-; GFX6-NEXT:    s_mul_i32 s14, s7, s11
-; GFX6-NEXT:    s_add_i32 s14, s10, s14
-; GFX6-NEXT:    s_sub_i32 s15, s9, s14
+; GFX6-NEXT:    v_readfirstlane_b32 s12, v0
+; GFX6-NEXT:    s_add_i32 s10, s12, s10
+; GFX6-NEXT:    s_mul_i32 s12, s7, s11
+; GFX6-NEXT:    s_add_i32 s16, s10, s12
+; GFX6-NEXT:    s_sub_i32 s12, s9, s16
 ; GFX6-NEXT:    s_mul_i32 s10, s6, s11
 ; GFX6-NEXT:    s_sub_i32 s8, s8, s10
 ; GFX6-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GFX6-NEXT:    s_or_b32 s16, s10, s11
-; GFX6-NEXT:    s_cmp_lg_u32 s16, 0
-; GFX6-NEXT:    s_subb_u32 s15, s15, s7
-; GFX6-NEXT:    s_sub_i32 s17, s8, s6
-; GFX6-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GFX6-NEXT:    s_or_b32 s10, s10, s11
-; GFX6-NEXT:    s_cmp_lg_u32 s10, 0
-; GFX6-NEXT:    s_subb_u32 s18, s15, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s18, s7
-; GFX6-NEXT:    s_cselect_b32 s11, -1, 0
-; GFX6-NEXT:    s_cmp_ge_u32 s17, s6
-; GFX6-NEXT:    s_cselect_b32 s19, -1, 0
-; GFX6-NEXT:    s_cmp_eq_u32 s18, s7
-; GFX6-NEXT:    s_cselect_b32 s19, s19, s11
-; GFX6-NEXT:    s_cmp_lg_u32 s10, 0
-; GFX6-NEXT:    s_subb_u32 s15, s15, s7
-; GFX6-NEXT:    s_sub_i32 s20, s17, s6
-; GFX6-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GFX6-NEXT:    s_subb_u32 s17, s12, s7
+; GFX6-NEXT:    s_sub_i32 s18, s8, s6
+; GFX6-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GFX6-NEXT:    s_subb_u32 s19, s17, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s19, s7
+; GFX6-NEXT:    s_cselect_b32 s20, -1, 0
+; GFX6-NEXT:    s_cmp_ge_u32 s18, s6
+; GFX6-NEXT:    s_cselect_b32 s21, -1, 0
+; GFX6-NEXT:    s_cmp_eq_u32 s19, s7
+; GFX6-NEXT:    s_cselect_b32 s20, s21, s20
+; GFX6-NEXT:    s_or_b32 s12, s12, s13
+; GFX6-NEXT:    s_cmp_lg_u32 s12, 0
+; GFX6-NEXT:    s_subb_u32 s12, s17, s7
+; GFX6-NEXT:    s_sub_i32 s13, s18, s6
+; GFX6-NEXT:    s_subb_u32 s12, s12, 0
+; GFX6-NEXT:    s_cmp_lg_u32 s20, 0
+; GFX6-NEXT:    s_cselect_b32 s13, s13, s18
+; GFX6-NEXT:    s_cselect_b32 s12, s12, s19
 ; GFX6-NEXT:    s_or_b32 s10, s10, s11
 ; GFX6-NEXT:    s_cmp_lg_u32 s10, 0
-; GFX6-NEXT:    s_subb_u32 s10, s15, 0
-; GFX6-NEXT:    s_cmp_lg_u32 s19, 0
-; GFX6-NEXT:    s_cselect_b32 s11, s20, s17
-; GFX6-NEXT:    s_cselect_b32 s10, s10, s18
-; GFX6-NEXT:    s_cmp_lg_u32 s16, 0
-; GFX6-NEXT:    s_subb_u32 s9, s9, s14
+; GFX6-NEXT:    s_subb_u32 s9, s9, s16
 ; GFX6-NEXT:    s_cmp_ge_u32 s9, s7
-; GFX6-NEXT:    s_cselect_b32 s14, -1, 0
+; GFX6-NEXT:    s_cselect_b32 s10, -1, 0
 ; GFX6-NEXT:    s_cmp_ge_u32 s8, s6
 ; GFX6-NEXT:    s_cselect_b32 s6, -1, 0
 ; GFX6-NEXT:    s_cmp_eq_u32 s9, s7
-; GFX6-NEXT:    s_cselect_b32 s6, s6, s14
+; GFX6-NEXT:    s_cselect_b32 s6, s6, s10
 ; GFX6-NEXT:    s_cmp_lg_u32 s6, 0
-; GFX6-NEXT:    s_cselect_b32 s7, s10, s9
-; GFX6-NEXT:    s_cselect_b32 s6, s11, s8
+; GFX6-NEXT:    s_cselect_b32 s7, s12, s9
+; GFX6-NEXT:    s_cselect_b32 s6, s13, s8
 ; GFX6-NEXT:    s_xor_b64 s[6:7], s[6:7], s[4:5]
 ; GFX6-NEXT:    s_sub_u32 s5, s6, s4
 ; GFX6-NEXT:    s_subb_u32 s4, s7, s4
 ; GFX6-NEXT:    s_mov_b32 s2, -1
-; GFX6-NEXT:    v_mov_b32_e32 v0, s12
-; GFX6-NEXT:    v_mov_b32_e32 v1, s13
+; GFX6-NEXT:    v_mov_b32_e32 v0, s14
+; GFX6-NEXT:    v_mov_b32_e32 v1, s15
 ; GFX6-NEXT:    v_mov_b32_e32 v2, s5
 ; GFX6-NEXT:    v_mov_b32_e32 v3, s4
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
@@ -9802,8 +9710,8 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    s_xor_b64 s[2:3], s[2:3], s[6:7]
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s2
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GFX9-NEXT:    s_sub_u32 s12, 0, s2
-; GFX9-NEXT:    s_subb_u32 s13, 0, s3
+; GFX9-NEXT:    s_sub_u32 s6, 0, s2
+; GFX9-NEXT:    s_subb_u32 s7, 0, s3
 ; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -9812,72 +9720,68 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT:    v_readfirstlane_b32 s14, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s6, v0
-; GFX9-NEXT:    s_mul_i32 s7, s12, s14
-; GFX9-NEXT:    s_mul_hi_u32 s16, s12, s6
-; GFX9-NEXT:    s_mul_i32 s15, s13, s6
-; GFX9-NEXT:    s_add_i32 s7, s16, s7
-; GFX9-NEXT:    s_mul_i32 s17, s12, s6
-; GFX9-NEXT:    s_add_i32 s7, s7, s15
-; GFX9-NEXT:    s_mul_hi_u32 s16, s6, s17
-; GFX9-NEXT:    s_mul_i32 s18, s6, s7
-; GFX9-NEXT:    s_mul_hi_u32 s15, s6, s7
+; GFX9-NEXT:    v_readfirstlane_b32 s12, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s13, v0
+; GFX9-NEXT:    s_mul_i32 s14, s6, s12
+; GFX9-NEXT:    s_mul_hi_u32 s16, s6, s13
+; GFX9-NEXT:    s_mul_i32 s15, s7, s13
+; GFX9-NEXT:    s_add_i32 s14, s16, s14
+; GFX9-NEXT:    s_mul_i32 s17, s6, s13
+; GFX9-NEXT:    s_add_i32 s14, s14, s15
+; GFX9-NEXT:    s_mul_hi_u32 s16, s13, s17
+; GFX9-NEXT:    s_mul_i32 s18, s13, s14
+; GFX9-NEXT:    s_mul_hi_u32 s15, s13, s14
 ; GFX9-NEXT:    s_add_u32 s16, s16, s18
 ; GFX9-NEXT:    s_addc_u32 s15, 0, s15
-; GFX9-NEXT:    s_mul_hi_u32 s18, s14, s17
-; GFX9-NEXT:    s_mul_i32 s17, s14, s17
+; GFX9-NEXT:    s_mul_hi_u32 s18, s12, s17
+; GFX9-NEXT:    s_mul_i32 s17, s12, s17
 ; GFX9-NEXT:    s_add_u32 s16, s16, s17
-; GFX9-NEXT:    s_mul_hi_u32 s19, s14, s7
+; GFX9-NEXT:    s_mul_hi_u32 s19, s12, s14
 ; GFX9-NEXT:    s_addc_u32 s15, s15, s18
 ; GFX9-NEXT:    s_addc_u32 s16, s19, 0
-; GFX9-NEXT:    s_mul_i32 s7, s14, s7
-; GFX9-NEXT:    s_add_u32 s7, s15, s7
+; GFX9-NEXT:    s_mul_i32 s14, s12, s14
+; GFX9-NEXT:    s_add_u32 s14, s15, s14
 ; GFX9-NEXT:    s_addc_u32 s15, 0, s16
-; GFX9-NEXT:    s_add_i32 s16, s6, s7
-; GFX9-NEXT:    s_cselect_b64 s[6:7], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX9-NEXT:    s_addc_u32 s14, s14, s15
-; GFX9-NEXT:    s_mul_i32 s6, s12, s14
-; GFX9-NEXT:    s_mul_hi_u32 s7, s12, s16
-; GFX9-NEXT:    s_add_i32 s6, s7, s6
-; GFX9-NEXT:    s_mul_i32 s13, s13, s16
-; GFX9-NEXT:    s_add_i32 s6, s6, s13
-; GFX9-NEXT:    s_mul_i32 s12, s12, s16
-; GFX9-NEXT:    s_mul_hi_u32 s13, s14, s12
-; GFX9-NEXT:    s_mul_i32 s15, s14, s12
-; GFX9-NEXT:    s_mul_i32 s18, s16, s6
-; GFX9-NEXT:    s_mul_hi_u32 s12, s16, s12
-; GFX9-NEXT:    s_mul_hi_u32 s17, s16, s6
-; GFX9-NEXT:    s_add_u32 s12, s12, s18
+; GFX9-NEXT:    s_add_i32 s13, s13, s14
+; GFX9-NEXT:    s_addc_u32 s12, s12, s15
+; GFX9-NEXT:    s_mul_i32 s14, s6, s12
+; GFX9-NEXT:    s_mul_hi_u32 s15, s6, s13
+; GFX9-NEXT:    s_add_i32 s14, s15, s14
+; GFX9-NEXT:    s_mul_i32 s7, s7, s13
+; GFX9-NEXT:    s_add_i32 s14, s14, s7
+; GFX9-NEXT:    s_mul_i32 s6, s6, s13
+; GFX9-NEXT:    s_mul_hi_u32 s15, s12, s6
+; GFX9-NEXT:    s_mul_i32 s16, s12, s6
+; GFX9-NEXT:    s_mul_i32 s18, s13, s14
+; GFX9-NEXT:    s_mul_hi_u32 s6, s13, s6
+; GFX9-NEXT:    s_mul_hi_u32 s17, s13, s14
+; GFX9-NEXT:    s_add_u32 s6, s6, s18
 ; GFX9-NEXT:    s_addc_u32 s17, 0, s17
-; GFX9-NEXT:    s_add_u32 s12, s12, s15
-; GFX9-NEXT:    s_mul_hi_u32 s7, s14, s6
-; GFX9-NEXT:    s_addc_u32 s12, s17, s13
+; GFX9-NEXT:    s_add_u32 s6, s6, s16
+; GFX9-NEXT:    s_mul_hi_u32 s7, s12, s14
+; GFX9-NEXT:    s_addc_u32 s6, s17, s15
 ; GFX9-NEXT:    s_addc_u32 s7, s7, 0
-; GFX9-NEXT:    s_mul_i32 s6, s14, s6
-; GFX9-NEXT:    s_add_u32 s6, s12, s6
-; GFX9-NEXT:    s_addc_u32 s12, 0, s7
-; GFX9-NEXT:    s_add_i32 s16, s16, s6
-; GFX9-NEXT:    s_cselect_b64 s[6:7], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX9-NEXT:    s_addc_u32 s12, s14, s12
+; GFX9-NEXT:    s_mul_i32 s14, s12, s14
+; GFX9-NEXT:    s_add_u32 s6, s6, s14
+; GFX9-NEXT:    s_addc_u32 s7, 0, s7
+; GFX9-NEXT:    s_add_i32 s13, s13, s6
+; GFX9-NEXT:    s_addc_u32 s12, s12, s7
 ; GFX9-NEXT:    s_ashr_i32 s6, s9, 31
 ; GFX9-NEXT:    s_add_u32 s8, s8, s6
 ; GFX9-NEXT:    s_mov_b32 s7, s6
 ; GFX9-NEXT:    s_addc_u32 s9, s9, s6
 ; GFX9-NEXT:    s_xor_b64 s[8:9], s[8:9], s[6:7]
-; GFX9-NEXT:    s_mul_i32 s14, s8, s12
-; GFX9-NEXT:    s_mul_hi_u32 s15, s8, s16
-; GFX9-NEXT:    s_mul_hi_u32 s13, s8, s12
-; GFX9-NEXT:    s_add_u32 s14, s15, s14
-; GFX9-NEXT:    s_addc_u32 s13, 0, s13
-; GFX9-NEXT:    s_mul_hi_u32 s17, s9, s16
-; GFX9-NEXT:    s_mul_i32 s16, s9, s16
-; GFX9-NEXT:    s_add_u32 s14, s14, s16
-; GFX9-NEXT:    s_mul_hi_u32 s15, s9, s12
-; GFX9-NEXT:    s_addc_u32 s13, s13, s17
-; GFX9-NEXT:    s_addc_u32 s14, s15, 0
+; GFX9-NEXT:    s_mul_i32 s15, s8, s12
+; GFX9-NEXT:    s_mul_hi_u32 s16, s8, s13
+; GFX9-NEXT:    s_mul_hi_u32 s14, s8, s12
+; GFX9-NEXT:    s_add_u32 s15, s16, s15
+; GFX9-NEXT:    s_addc_u32 s14, 0, s14
+; GFX9-NEXT:    s_mul_hi_u32 s17, s9, s13
+; GFX9-NEXT:    s_mul_i32 s13, s9, s13
+; GFX9-NEXT:    s_add_u32 s13, s15, s13
+; GFX9-NEXT:    s_mul_hi_u32 s16, s9, s12
+; GFX9-NEXT:    s_addc_u32 s13, s14, s17
+; GFX9-NEXT:    s_addc_u32 s14, s16, 0
 ; GFX9-NEXT:    s_mul_i32 s12, s9, s12
 ; GFX9-NEXT:    s_add_u32 s12, s13, s12
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s14
@@ -9890,11 +9794,9 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    s_mul_i32 s12, s2, s12
 ; GFX9-NEXT:    s_sub_i32 s8, s8, s12
 ; GFX9-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
 ; GFX9-NEXT:    s_subb_u32 s17, s14, s3
 ; GFX9-NEXT:    s_sub_i32 s18, s8, s2
 ; GFX9-NEXT:    s_cselect_b64 s[14:15], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[14:15], 0
 ; GFX9-NEXT:    s_subb_u32 s19, s17, 0
 ; GFX9-NEXT:    s_cmp_ge_u32 s19, s3
 ; GFX9-NEXT:    s_cselect_b32 s20, -1, 0
@@ -9903,13 +9805,11 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    s_cmp_eq_u32 s19, s3
 ; GFX9-NEXT:    s_cselect_b32 s20, s21, s20
 ; GFX9-NEXT:    s_cmp_lg_u64 s[14:15], 0
-; GFX9-NEXT:    s_subb_u32 s17, s17, s3
-; GFX9-NEXT:    s_sub_i32 s21, s18, s2
-; GFX9-NEXT:    s_cselect_b64 s[14:15], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[14:15], 0
-; GFX9-NEXT:    s_subb_u32 s14, s17, 0
+; GFX9-NEXT:    s_subb_u32 s14, s17, s3
+; GFX9-NEXT:    s_sub_i32 s15, s18, s2
+; GFX9-NEXT:    s_subb_u32 s14, s14, 0
 ; GFX9-NEXT:    s_cmp_lg_u32 s20, 0
-; GFX9-NEXT:    s_cselect_b32 s15, s21, s18
+; GFX9-NEXT:    s_cselect_b32 s15, s15, s18
 ; GFX9-NEXT:    s_cselect_b32 s14, s14, s19
 ; GFX9-NEXT:    s_cmp_lg_u64 s[12:13], 0
 ; GFX9-NEXT:    s_subb_u32 s9, s9, s16
@@ -9933,8 +9833,8 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s2
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s3
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0x24
-; GFX9-NEXT:    s_sub_u32 s6, 0, s2
-; GFX9-NEXT:    s_subb_u32 s7, 0, s3
+; GFX9-NEXT:    s_sub_u32 s4, 0, s2
+; GFX9-NEXT:    s_subb_u32 s5, 0, s3
 ; GFX9-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GFX9-NEXT:    v_rcp_f32_e32 v1, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
@@ -9944,74 +9844,70 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    v_mac_f32_e32 v1, 0xcf800000, v2
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v2, v2
-; GFX9-NEXT:    v_readfirstlane_b32 s4, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s6, v1
 ; GFX9-NEXT:    v_readfirstlane_b32 s9, v2
-; GFX9-NEXT:    s_mul_hi_u32 s8, s6, s4
-; GFX9-NEXT:    s_mul_i32 s14, s6, s9
-; GFX9-NEXT:    s_mul_i32 s5, s7, s4
+; GFX9-NEXT:    s_mul_hi_u32 s8, s4, s6
+; GFX9-NEXT:    s_mul_i32 s14, s4, s9
+; GFX9-NEXT:    s_mul_i32 s7, s5, s6
 ; GFX9-NEXT:    s_add_i32 s8, s8, s14
-; GFX9-NEXT:    s_add_i32 s8, s8, s5
-; GFX9-NEXT:    s_mul_i32 s15, s6, s4
-; GFX9-NEXT:    s_mul_i32 s14, s4, s8
-; GFX9-NEXT:    s_mul_hi_u32 s16, s4, s15
-; GFX9-NEXT:    s_mul_hi_u32 s5, s4, s8
+; GFX9-NEXT:    s_add_i32 s8, s8, s7
+; GFX9-NEXT:    s_mul_i32 s15, s4, s6
+; GFX9-NEXT:    s_mul_i32 s14, s6, s8
+; GFX9-NEXT:    s_mul_hi_u32 s16, s6, s15
+; GFX9-NEXT:    s_mul_hi_u32 s7, s6, s8
 ; GFX9-NEXT:    s_add_u32 s14, s16, s14
-; GFX9-NEXT:    s_addc_u32 s5, 0, s5
+; GFX9-NEXT:    s_addc_u32 s7, 0, s7
 ; GFX9-NEXT:    s_mul_hi_u32 s17, s9, s15
 ; GFX9-NEXT:    s_mul_i32 s15, s9, s15
 ; GFX9-NEXT:    s_add_u32 s14, s14, s15
 ; GFX9-NEXT:    s_mul_hi_u32 s16, s9, s8
-; GFX9-NEXT:    s_addc_u32 s5, s5, s17
+; GFX9-NEXT:    s_addc_u32 s7, s7, s17
 ; GFX9-NEXT:    s_addc_u32 s14, s16, 0
 ; GFX9-NEXT:    s_mul_i32 s8, s9, s8
-; GFX9-NEXT:    s_add_u32 s5, s5, s8
+; GFX9-NEXT:    s_add_u32 s7, s7, s8
 ; GFX9-NEXT:    s_addc_u32 s8, 0, s14
-; GFX9-NEXT:    s_add_i32 s14, s4, s5
-; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT:    s_addc_u32 s8, s9, s8
-; GFX9-NEXT:    s_mul_i32 s4, s6, s8
-; GFX9-NEXT:    s_mul_hi_u32 s5, s6, s14
-; GFX9-NEXT:    s_add_i32 s4, s5, s4
-; GFX9-NEXT:    s_mul_i32 s7, s7, s14
-; GFX9-NEXT:    s_add_i32 s4, s4, s7
-; GFX9-NEXT:    s_mul_i32 s6, s6, s14
-; GFX9-NEXT:    s_mul_hi_u32 s7, s8, s6
-; GFX9-NEXT:    s_mul_i32 s9, s8, s6
-; GFX9-NEXT:    s_mul_i32 s16, s14, s4
-; GFX9-NEXT:    s_mul_hi_u32 s6, s14, s6
-; GFX9-NEXT:    s_mul_hi_u32 s15, s14, s4
-; GFX9-NEXT:    s_add_u32 s6, s6, s16
+; GFX9-NEXT:    s_add_i32 s6, s6, s7
+; GFX9-NEXT:    s_addc_u32 s7, s9, s8
+; GFX9-NEXT:    s_mul_i32 s8, s4, s7
+; GFX9-NEXT:    s_mul_hi_u32 s9, s4, s6
+; GFX9-NEXT:    s_add_i32 s8, s9, s8
+; GFX9-NEXT:    s_mul_i32 s5, s5, s6
+; GFX9-NEXT:    s_add_i32 s8, s8, s5
+; GFX9-NEXT:    s_mul_i32 s4, s4, s6
+; GFX9-NEXT:    s_mul_hi_u32 s9, s7, s4
+; GFX9-NEXT:    s_mul_i32 s14, s7, s4
+; GFX9-NEXT:    s_mul_i32 s16, s6, s8
+; GFX9-NEXT:    s_mul_hi_u32 s4, s6, s4
+; GFX9-NEXT:    s_mul_hi_u32 s15, s6, s8
+; GFX9-NEXT:    s_add_u32 s4, s4, s16
 ; GFX9-NEXT:    s_addc_u32 s15, 0, s15
-; GFX9-NEXT:    s_add_u32 s6, s6, s9
-; GFX9-NEXT:    s_mul_hi_u32 s5, s8, s4
-; GFX9-NEXT:    s_addc_u32 s6, s15, s7
+; GFX9-NEXT:    s_add_u32 s4, s4, s14
+; GFX9-NEXT:    s_mul_hi_u32 s5, s7, s8
+; GFX9-NEXT:    s_addc_u32 s4, s15, s9
 ; GFX9-NEXT:    s_addc_u32 s5, s5, 0
-; GFX9-NEXT:    s_mul_i32 s4, s8, s4
-; GFX9-NEXT:    s_add_u32 s4, s6, s4
-; GFX9-NEXT:    s_addc_u32 s6, 0, s5
-; GFX9-NEXT:    s_add_i32 s14, s14, s4
-; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX9-NEXT:    s_addc_u32 s8, s8, s6
+; GFX9-NEXT:    s_mul_i32 s8, s7, s8
+; GFX9-NEXT:    s_add_u32 s4, s4, s8
+; GFX9-NEXT:    s_addc_u32 s5, 0, s5
+; GFX9-NEXT:    s_add_i32 s8, s6, s4
+; GFX9-NEXT:    s_addc_u32 s9, s7, s5
 ; GFX9-NEXT:    s_ashr_i32 s4, s11, 31
 ; GFX9-NEXT:    s_add_u32 s6, s10, s4
 ; GFX9-NEXT:    s_mov_b32 s5, s4
 ; GFX9-NEXT:    s_addc_u32 s7, s11, s4
 ; GFX9-NEXT:    s_xor_b64 s[6:7], s[6:7], s[4:5]
-; GFX9-NEXT:    s_mul_i32 s10, s6, s8
-; GFX9-NEXT:    s_mul_hi_u32 s11, s6, s14
-; GFX9-NEXT:    s_mul_hi_u32 s9, s6, s8
-; GFX9-NEXT:    s_add_u32 s10, s11, s10
-; GFX9-NEXT:    s_addc_u32 s9, 0, s9
-; GFX9-NEXT:    s_mul_hi_u32 s15, s7, s14
-; GFX9-NEXT:    s_mul_i32 s14, s7, s14
-; GFX9-NEXT:    s_add_u32 s10, s10, s14
-; GFX9-NEXT:    s_mul_hi_u32 s11, s7, s8
-; GFX9-NEXT:    s_addc_u32 s9, s9, s15
-; GFX9-NEXT:    s_addc_u32 s10, s11, 0
+; GFX9-NEXT:    s_mul_i32 s11, s6, s9
+; GFX9-NEXT:    s_mul_hi_u32 s14, s6, s8
+; GFX9-NEXT:    s_mul_hi_u32 s10, s6, s9
+; GFX9-NEXT:    s_add_u32 s11, s14, s11
+; GFX9-NEXT:    s_addc_u32 s10, 0, s10
+; GFX9-NEXT:    s_mul_hi_u32 s15, s7, s8
 ; GFX9-NEXT:    s_mul_i32 s8, s7, s8
-; GFX9-NEXT:    s_add_u32 s8, s9, s8
+; GFX9-NEXT:    s_add_u32 s8, s11, s8
+; GFX9-NEXT:    s_mul_hi_u32 s14, s7, s9
+; GFX9-NEXT:    s_addc_u32 s8, s10, s15
+; GFX9-NEXT:    s_addc_u32 s10, s14, 0
+; GFX9-NEXT:    s_mul_i32 s9, s7, s9
+; GFX9-NEXT:    s_add_u32 s8, s8, s9
 ; GFX9-NEXT:    s_addc_u32 s9, 0, s10
 ; GFX9-NEXT:    s_mul_i32 s9, s2, s9
 ; GFX9-NEXT:    s_mul_hi_u32 s10, s2, s8
@@ -10022,11 +9918,9 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    s_mul_i32 s8, s2, s8
 ; GFX9-NEXT:    s_sub_i32 s6, s6, s8
 ; GFX9-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
 ; GFX9-NEXT:    s_subb_u32 s15, s10, s3
 ; GFX9-NEXT:    s_sub_i32 s16, s6, s2
 ; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
 ; GFX9-NEXT:    s_subb_u32 s17, s15, 0
 ; GFX9-NEXT:    s_cmp_ge_u32 s17, s3
 ; GFX9-NEXT:    s_cselect_b32 s18, -1, 0
@@ -10035,13 +9929,11 @@ define amdgpu_kernel void @srem_v2i64_pow2_shl_denom(ptr addrspace(1) %out, <2 x
 ; GFX9-NEXT:    s_cmp_eq_u32 s17, s3
 ; GFX9-NEXT:    s_cselect_b32 s18, s19, s18
 ; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT:    s_subb_u32 s15, s15, s3
-; GFX9-NEXT:    s_sub_i32 s19, s16, s2
-; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT:    s_subb_u32 s10, s15, 0
+; GFX9-NEXT:    s_subb_u32 s10, s15, s3
+; GFX9-NEXT:    s_sub_i32 s11, s16, s2
+; GFX9-NEXT:    s_subb_u32 s10, s10, 0
 ; GFX9-NEXT:    s_cmp_lg_u32 s18, 0
-; GFX9-NEXT:    s_cselect_b32 s11, s19, s16
+; GFX9-NEXT:    s_cselect_b32 s11, s11, s16
 ; GFX9-NEXT:    s_cselect_b32 s10, s10, s17
 ; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
 ; GFX9-NEXT:    s_subb_u32 s7, s7, s14
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index 1211a9f4363fe..844f00f234333 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -691,7 +691,8 @@ define amdgpu_kernel void @uaddo32_vcc_user(ptr addrspace(1) %out, ptr addrspace
 ; GCN-ISEL-LABEL: name:   suaddo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: S_ADD_U64_PSEUDO
+; GCN-ISEL: S_UADDO_PSEUDO
+; GCN-ISEL: S_ADD_CO_PSEUDO
 
 define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a, i64 %b) #0 {
 ; CISI-LABEL: suaddo64:
@@ -700,21 +701,20 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s11, 0xf000
 ; CISI-NEXT:    s_mov_b32 s10, -1
 ; CISI-NEXT:    s_waitcnt lgkmcnt(0)
-; CISI-NEXT:    s_add_u32 s6, s4, s6
-; CISI-NEXT:    v_mov_b32_e32 v0, s4
-; CISI-NEXT:    s_addc_u32 s7, s5, s7
-; CISI-NEXT:    v_mov_b32_e32 v1, s5
-; CISI-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; CISI-NEXT:    v_mov_b32_e32 v2, s6
+; CISI-NEXT:    s_add_i32 s4, s4, s6
+; CISI-NEXT:    s_addc_u32 s5, s5, s7
 ; CISI-NEXT:    s_mov_b32 s8, s0
 ; CISI-NEXT:    s_mov_b32 s9, s1
+; CISI-NEXT:    v_mov_b32_e32 v0, s4
+; CISI-NEXT:    v_mov_b32_e32 v1, s5
+; CISI-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s10
 ; CISI-NEXT:    s_mov_b32 s3, s11
-; CISI-NEXT:    v_mov_b32_e32 v3, s7
-; CISI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CISI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; CISI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; CISI-NEXT:    s_waitcnt expcnt(0)
+; CISI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
 ; CISI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
 ; CISI-NEXT:    s_endpgm
 ;
@@ -723,36 +723,32 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_add_u32 s0, s4, s6
-; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    s_add_i32 s0, s4, s6
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    s_addc_u32 s1, s5, s7
-; VI-NEXT:    v_mov_b32_e32 v5, s5
-; VI-NEXT:    v_mov_b32_e32 v7, s1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v6, s0
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; VI-NEXT:    flat_store_byte v[2:3], v0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: suaddo64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_add_u32 s0, s12, s14
-; GFX9-NEXT:    v_mov_b32_e32 v0, s12
-; GFX9-NEXT:    v_mov_b32_e32 v1, s13
+; GFX9-NEXT:    s_add_i32 s0, s12, s14
 ; GFX9-NEXT:    s_addc_u32 s1, s13, s15
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT:    global_store_byte v2, v3, s[10:11]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX1010-LABEL: suaddo64:
@@ -760,11 +756,11 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
 ; GFX1010-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1010-NEXT:    s_add_u32 s0, s12, s14
+; GFX1010-NEXT:    s_add_i32 s0, s12, s14
 ; GFX1010-NEXT:    s_addc_u32 s1, s13, s15
 ; GFX1010-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1010-NEXT:    s_cselect_b32 s0, -1, 0
 ; GFX1010-NEXT:    v_mov_b32_e32 v1, s1
-; GFX1010-NEXT:    v_cmp_lt_u64_e64 s0, s[0:1], s[12:13]
 ; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s0
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[10:11]
@@ -775,11 +771,11 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1030W32-NEXT:    s_add_u32 s6, s4, s6
-; GFX1030W32-NEXT:    s_addc_u32 s7, s5, s7
-; GFX1030W32-NEXT:    v_mov_b32_e32 v0, s6
-; GFX1030W32-NEXT:    v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1030W32-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1030W32-NEXT:    s_add_i32 s4, s4, s6
+; GFX1030W32-NEXT:    s_addc_u32 s5, s5, s7
+; GFX1030W32-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1030W32-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX1030W32-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
@@ -790,11 +786,11 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1030W64-NEXT:    s_add_u32 s6, s4, s6
-; GFX1030W64-NEXT:    s_addc_u32 s7, s5, s7
-; GFX1030W64-NEXT:    v_mov_b32_e32 v0, s6
-; GFX1030W64-NEXT:    v_cmp_lt_u64_e64 s[4:5], s[6:7], s[4:5]
-; GFX1030W64-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1030W64-NEXT:    s_add_i32 s4, s4, s6
+; GFX1030W64-NEXT:    s_addc_u32 s5, s5, s7
+; GFX1030W64-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1030W64-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1030W64-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
@@ -804,12 +800,11 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_add_u32 s6, s4, s6
-; GFX11-NEXT:    s_addc_u32 s7, s5, s7
-; GFX11-NEXT:    v_mov_b32_e32 v0, s6
-; GFX11-NEXT:    v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    s_add_i32 s4, s4, s6
+; GFX11-NEXT:    s_addc_u32 s5, s5, s7
+; GFX11-NEXT:    v_mov_b32_e32 v0, s4
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -819,12 +814,12 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-LABEL: suaddo64:
 ; GFX1250:       ; %bb.0:
 ; GFX1250-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_add_nc_u64 s[6:7], s[4:5], s[6:7]
-; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1250-NEXT:    v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1250-NEXT:    v_mov_b64_e32 v[0:1], s[6:7]
+; GFX1250-NEXT:    s_add_co_i32 s4, s4, s6
+; GFX1250-NEXT:    s_add_co_ci_u32 s5, s5, s7
+; GFX1250-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s4
+; GFX1250-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX1250-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -1665,7 +1660,8 @@ define amdgpu_kernel void @usubo32_vcc_user(ptr addrspace(1) %out, ptr addrspace
 ; GCN-ISEL-LABEL: name:   susubo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: S_SUB_U64_PSEUDO
+; GCN-ISEL: S_USUBO_PSEUDO
+; GCN-ISEL: S_SUB_CO_PSEUDO
 
 define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a, i64 %b) #0 {
 ; CISI-LABEL: susubo64:
@@ -1674,21 +1670,20 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s11, 0xf000
 ; CISI-NEXT:    s_mov_b32 s10, -1
 ; CISI-NEXT:    s_waitcnt lgkmcnt(0)
-; CISI-NEXT:    s_sub_u32 s6, s4, s6
-; CISI-NEXT:    v_mov_b32_e32 v0, s4
-; CISI-NEXT:    s_subb_u32 s7, s5, s7
-; CISI-NEXT:    v_mov_b32_e32 v1, s5
-; CISI-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; CISI-NEXT:    v_mov_b32_e32 v2, s6
+; CISI-NEXT:    s_sub_i32 s4, s4, s6
+; CISI-NEXT:    s_subb_u32 s5, s5, s7
 ; CISI-NEXT:    s_mov_b32 s8, s0
 ; CISI-NEXT:    s_mov_b32 s9, s1
+; CISI-NEXT:    v_mov_b32_e32 v0, s4
+; CISI-NEXT:    v_mov_b32_e32 v1, s5
+; CISI-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s10
 ; CISI-NEXT:    s_mov_b32 s3, s11
-; CISI-NEXT:    v_mov_b32_e32 v3, s7
-; CISI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CISI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; CISI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; CISI-NEXT:    s_waitcnt expcnt(0)
+; CISI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
 ; CISI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
 ; CISI-NEXT:    s_endpgm
 ;
@@ -1697,36 +1692,32 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_sub_u32 s0, s4, s6
-; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    s_sub_i32 s0, s4, s6
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    s_subb_u32 s1, s5, s7
-; VI-NEXT:    v_mov_b32_e32 v5, s5
-; VI-NEXT:    v_mov_b32_e32 v7, s1
-; VI-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v6, s0
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; VI-NEXT:    flat_store_byte v[2:3], v0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: susubo64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_sub_u32 s0, s12, s14
-; GFX9-NEXT:    v_mov_b32_e32 v0, s12
-; GFX9-NEXT:    v_mov_b32_e32 v1, s13
+; GFX9-NEXT:    s_sub_i32 s0, s12, s14
 ; GFX9-NEXT:    s_subb_u32 s1, s13, s15
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT:    global_store_byte v2, v3, s[10:11]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX1010-LABEL: susubo64:
@@ -1734,11 +1725,11 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
 ; GFX1010-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1010-NEXT:    s_sub_u32 s0, s12, s14
+; GFX1010-NEXT:    s_sub_i32 s0, s12, s14
 ; GFX1010-NEXT:    s_subb_u32 s1, s13, s15
 ; GFX1010-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1010-NEXT:    s_cselect_b32 s0, -1, 0
 ; GFX1010-NEXT:    v_mov_b32_e32 v1, s1
-; GFX1010-NEXT:    v_cmp_gt_u64_e64 s0, s[0:1], s[12:13]
 ; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s0
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[10:11]
@@ -1749,11 +1740,11 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1030W32-NEXT:    s_sub_u32 s6, s4, s6
-; GFX1030W32-NEXT:    s_subb_u32 s7, s5, s7
-; GFX1030W32-NEXT:    v_mov_b32_e32 v0, s6
-; GFX1030W32-NEXT:    v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1030W32-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1030W32-NEXT:    s_sub_i32 s4, s4, s6
+; GFX1030W32-NEXT:    s_subb_u32 s5, s5, s7
+; GFX1030W32-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1030W32-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX1030W32-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
@@ -1764,11 +1755,11 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1030W64-NEXT:    s_sub_u32 s6, s4, s6
-; GFX1030W64-NEXT:    s_subb_u32 s7, s5, s7
-; GFX1030W64-NEXT:    v_mov_b32_e32 v0, s6
-; GFX1030W64-NEXT:    v_cmp_gt_u64_e64 s[4:5], s[6:7], s[4:5]
-; GFX1030W64-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1030W64-NEXT:    s_sub_i32 s4, s4, s6
+; GFX1030W64-NEXT:    s_subb_u32 s5, s5, s7
+; GFX1030W64-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1030W64-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1030W64-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
@@ -1778,12 +1769,11 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_sub_u32 s6, s4, s6
-; GFX11-NEXT:    s_subb_u32 s7, s5, s7
-; GFX11-NEXT:    v_mov_b32_e32 v0, s6
-; GFX11-NEXT:    v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    s_sub_i32 s4, s4, s6
+; GFX11-NEXT:    s_subb_u32 s5, s5, s7
+; GFX11-NEXT:    v_mov_b32_e32 v0, s4
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -1793,12 +1783,12 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-LABEL: susubo64:
 ; GFX1250:       ; %bb.0:
 ; GFX1250-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_sub_nc_u64 s[6:7], s[4:5], s[6:7]
-; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1250-NEXT:    v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1250-NEXT:    v_mov_b64_e32 v[0:1], s[6:7]
+; GFX1250-NEXT:    s_sub_co_i32 s4, s4, s6
+; GFX1250-NEXT:    s_sub_co_ci_u32 s5, s5, s7
+; GFX1250-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s4
+; GFX1250-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX1250-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -2192,49 +2182,46 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; VI-NEXT:    s_addc_u32 s6, s7, s9
 ; VI-NEXT:    s_addc_u32 s8, s8, 0
 ; VI-NEXT:    v_readfirstlane_b32 s7, v0
-; VI-NEXT:    s_add_u32 s12, s6, s7
-; VI-NEXT:    v_mov_b32_e32 v0, s12
+; VI-NEXT:    s_add_u32 s10, s6, s7
+; VI-NEXT:    v_mov_b32_e32 v0, s10
 ; VI-NEXT:    v_mad_u64_u32 v[0:1], s[6:7], s4, v0, 0
-; VI-NEXT:    s_addc_u32 s13, 0, s8
-; VI-NEXT:    s_mul_i32 s8, s4, s13
+; VI-NEXT:    s_addc_u32 s11, 0, s8
+; VI-NEXT:    s_mul_i32 s8, s4, s11
 ; VI-NEXT:    v_readfirstlane_b32 s9, v1
 ; VI-NEXT:    s_add_i32 s8, s9, s8
-; VI-NEXT:    s_mul_i32 s9, s5, s12
-; VI-NEXT:    s_add_i32 s14, s8, s9
-; VI-NEXT:    s_sub_i32 s10, s3, s14
+; VI-NEXT:    s_mul_i32 s9, s5, s10
+; VI-NEXT:    s_add_i32 s12, s8, s9
+; VI-NEXT:    s_sub_i32 s13, s3, s12
 ; VI-NEXT:    v_readfirstlane_b32 s8, v0
-; VI-NEXT:    s_sub_i32 s15, s2, s8
+; VI-NEXT:    s_sub_i32 s14, s2, s8
 ; VI-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; VI-NEXT:    s_cmp_lg_u64 s[8:9], 0
-; VI-NEXT:    s_subb_u32 s16, s10, s5
-; VI-NEXT:    s_sub_i32 s17, s15, s4
-; VI-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; VI-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; VI-NEXT:    s_subb_u32 s10, s16, 0
-; VI-NEXT:    s_cmp_ge_u32 s10, s5
-; VI-NEXT:    s_cselect_b32 s11, -1, 0
-; VI-NEXT:    s_cmp_ge_u32 s17, s4
+; VI-NEXT:    s_subb_u32 s13, s13, s5
+; VI-NEXT:    s_sub_i32 s15, s14, s4
+; VI-NEXT:    s_subb_u32 s13, s13, 0
+; VI-NEXT:    s_cmp_ge_u32 s13, s5
 ; VI-NEXT:    s_cselect_b32 s16, -1, 0
-; VI-NEXT:    s_cmp_eq_u32 s10, s5
-; VI-NEXT:    s_cselect_b32 s10, s16, s11
-; VI-NEXT:    s_add_u32 s11, s12, 1
-; VI-NEXT:    s_addc_u32 s16, s13, 0
-; VI-NEXT:    s_add_u32 s17, s12, 2
-; VI-NEXT:    s_addc_u32 s18, s13, 0
-; VI-NEXT:    s_cmp_lg_u32 s10, 0
-; VI-NEXT:    s_cselect_b32 s10, s17, s11
-; VI-NEXT:    s_cselect_b32 s11, s18, s16
+; VI-NEXT:    s_cmp_ge_u32 s15, s4
+; VI-NEXT:    s_cselect_b32 s15, -1, 0
+; VI-NEXT:    s_cmp_eq_u32 s13, s5
+; VI-NEXT:    s_cselect_b32 s13, s15, s16
+; VI-NEXT:    s_add_u32 s15, s10, 1
+; VI-NEXT:    s_addc_u32 s16, s11, 0
+; VI-NEXT:    s_add_u32 s17, s10, 2
+; VI-NEXT:    s_addc_u32 s18, s11, 0
+; VI-NEXT:    s_cmp_lg_u32 s13, 0
+; VI-NEXT:    s_cselect_b32 s13, s17, s15
+; VI-NEXT:    s_cselect_b32 s15, s18, s16
 ; VI-NEXT:    s_cmp_lg_u64 s[8:9], 0
-; VI-NEXT:    s_subb_u32 s3, s3, s14
+; VI-NEXT:    s_subb_u32 s3, s3, s12
 ; VI-NEXT:    s_cmp_ge_u32 s3, s5
 ; VI-NEXT:    s_cselect_b32 s8, -1, 0
-; VI-NEXT:    s_cmp_ge_u32 s15, s4
+; VI-NEXT:    s_cmp_ge_u32 s14, s4
 ; VI-NEXT:    s_cselect_b32 s9, -1, 0
 ; VI-NEXT:    s_cmp_eq_u32 s3, s5
 ; VI-NEXT:    s_cselect_b32 s3, s9, s8
 ; VI-NEXT:    s_cmp_lg_u32 s3, 0
-; VI-NEXT:    s_cselect_b32 s9, s11, s13
-; VI-NEXT:    s_cselect_b32 s8, s10, s12
+; VI-NEXT:    s_cselect_b32 s9, s15, s11
+; VI-NEXT:    s_cselect_b32 s8, s13, s10
 ; VI-NEXT:    s_cbranch_execnz .LBB16_4
 ; VI-NEXT:  .LBB16_2:
 ; VI-NEXT:    v_cvt_f32_u32_e32 v0, s4
@@ -2285,8 +2272,8 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX9-NEXT:  ; %bb.1:
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v1, s7
-; GFX9-NEXT:    s_sub_u32 s10, 0, s6
-; GFX9-NEXT:    s_subb_u32 s11, 0, s7
+; GFX9-NEXT:    s_sub_u32 s8, 0, s6
+; GFX9-NEXT:    s_subb_u32 s9, 0, s7
 ; GFX9-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX9-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2295,109 +2282,102 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX9-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX9-NEXT:    v_readfirstlane_b32 s12, v1
-; GFX9-NEXT:    v_readfirstlane_b32 s8, v0
-; GFX9-NEXT:    s_mul_i32 s9, s10, s12
-; GFX9-NEXT:    s_mul_hi_u32 s14, s10, s8
-; GFX9-NEXT:    s_mul_i32 s13, s11, s8
-; GFX9-NEXT:    s_add_i32 s9, s14, s9
-; GFX9-NEXT:    s_add_i32 s9, s9, s13
-; GFX9-NEXT:    s_mul_i32 s15, s10, s8
-; GFX9-NEXT:    s_mul_i32 s14, s8, s9
-; GFX9-NEXT:    s_mul_hi_u32 s16, s8, s15
-; GFX9-NEXT:    s_mul_hi_u32 s13, s8, s9
+; GFX9-NEXT:    v_readfirstlane_b32 s10, v1
+; GFX9-NEXT:    v_readfirstlane_b32 s11, v0
+; GFX9-NEXT:    s_mul_i32 s12, s8, s10
+; GFX9-NEXT:    s_mul_hi_u32 s14, s8, s11
+; GFX9-NEXT:    s_mul_i32 s13, s9, s11
+; GFX9-NEXT:    s_add_i32 s12, s14, s12
+; GFX9-NEXT:    s_add_i32 s12, s12, s13
+; GFX9-NEXT:    s_mul_i32 s15, s8, s11
+; GFX9-NEXT:    s_mul_i32 s14, s11, s12
+; GFX9-NEXT:    s_mul_hi_u32 s16, s11, s15
+; GFX9-NEXT:    s_mul_hi_u32 s13, s11, s12
 ; GFX9-NEXT:    s_add_u32 s14, s16, s14
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s13
-; GFX9-NEXT:    s_mul_hi_u32 s17, s12, s15
-; GFX9-NEXT:    s_mul_i32 s15, s12, s15
+; GFX9-NEXT:    s_mul_hi_u32 s17, s10, s15
+; GFX9-NEXT:    s_mul_i32 s15, s10, s15
 ; GFX9-NEXT:    s_add_u32 s14, s14, s15
-; GFX9-NEXT:    s_mul_hi_u32 s16, s12, s9
+; GFX9-NEXT:    s_mul_hi_u32 s16, s10, s12
 ; GFX9-NEXT:    s_addc_u32 s13, s13, s17
 ; GFX9-NEXT:    s_addc_u32 s14, s16, 0
-; GFX9-NEXT:    s_mul_i32 s9, s12, s9
-; GFX9-NEXT:    s_add_u32 s9, s13, s9
+; GFX9-NEXT:    s_mul_i32 s12, s10, s12
+; GFX9-NEXT:    s_add_u32 s12, s13, s12
 ; GFX9-NEXT:    s_addc_u32 s13, 0, s14
-; GFX9-NEXT:    s_add_i32 s14, s8, s9
-; GFX9-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
-; GFX9-NEXT:    s_addc_u32 s12, s12, s13
-; GFX9-NEXT:    s_mul_i32 s8, s10, s12
-; GFX9-NEXT:    s_mul_hi_u32 s9, s10, s14
-; GFX9-NEXT:    s_add_i32 s8, s9, s8
-; GFX9-NEXT:    s_mul_i32 s11, s11, s14
-; GFX9-NEXT:    s_add_i32 s8, s8, s11
-; GFX9-NEXT:    s_mul_i32 s10, s10, s14
-; GFX9-NEXT:    s_mul_hi_u32 s11, s12, s10
-; GFX9-NEXT:    s_mul_i32 s13, s12, s10
-; GFX9-NEXT:    s_mul_i32 s16, s14, s8
-; GFX9-NEXT:    s_mul_hi_u32 s10, s14, s10
-; GFX9-NEXT:    s_mul_hi_u32 s15, s14, s8
-; GFX9-NEXT:    s_add_u32 s10, s10, s16
+; GFX9-NEXT:    s_add_i32 s11, s11, s12
+; GFX9-NEXT:    s_addc_u32 s10, s10, s13
+; GFX9-NEXT:    s_mul_i32 s12, s8, s10
+; GFX9-NEXT:    s_mul_hi_u32 s13, s8, s11
+; GFX9-NEXT:    s_add_i32 s12, s13, s12
+; GFX9-NEXT:    s_mul_i32 s9, s9, s11
+; GFX9-NEXT:    s_add_i32 s12, s12, s9
+; GFX9-NEXT:    s_mul_i32 s8, s8, s11
+; GFX9-NEXT:    s_mul_hi_u32 s13, s10, s8
+; GFX9-NEXT:    s_mul_i32 s14, s10, s8
+; GFX9-NEXT:    s_mul_i32 s16, s11, s12
+; GFX9-NEXT:    s_mul_hi_u32 s8, s11, s8
+; GFX9-NEXT:    s_mul_hi_u32 s15, s11, s12
+; GFX9-NEXT:    s_add_u32 s8, s8, s16
 ; GFX9-NEXT:    s_addc_u32 s15, 0, s15
-; GFX9-NEXT:    s_add_u32 s10, s10, s13
-; GFX9-NEXT:    s_mul_hi_u32 s9, s12, s8
-; GFX9-NEXT:    s_addc_u32 s10, s15, s11
+; GFX9-NEXT:    s_add_u32 s8, s8, s14
+; GFX9-NEXT:    s_mul_hi_u32 s9, s10, s12
+; GFX9-NEXT:    s_addc_u32 s8, s15, s13
 ; GFX9-NEXT:    s_addc_u32 s9, s9, 0
-; GFX9-NEXT:    s_mul_i32 s8, s12, s8
-; GFX9-NEXT:    s_add_u32 s8, s10, s8
-; GFX9-NEXT:    s_addc_u32 s10, 0, s9
-; GFX9-NEXT:    s_add_i32 s14, s14, s8
-; GFX9-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
-; GFX9-NEXT:    s_addc_u32 s8, s12, s10
+; GFX9-NEXT:    s_mul_i32 s12, s10, s12
+; GFX9-NEXT:    s_add_u32 s8, s8, s12
+; GFX9-NEXT:    s_addc_u32 s9, 0, s9
+; GFX9-NEXT:    s_add_i32 s11, s11, s8
+; GFX9-NEXT:    s_addc_u32 s8, s10, s9
 ; GFX9-NEXT:    s_mul_i32 s10, s2, s8
-; GFX9-NEXT:    s_mul_hi_u32 s11, s2, s14
+; GFX9-NEXT:    s_mul_hi_u32 s12, s2, s11
 ; GFX9-NEXT:    s_mul_hi_u32 s9, s2, s8
-; GFX9-NEXT:    s_add_u32 s10, s11, s10
+; GFX9-NEXT:    s_add_u32 s10, s12, s10
 ; GFX9-NEXT:    s_addc_u32 s9, 0, s9
-; GFX9-NEXT:    s_mul_i32 s13, s3, s14
-; GFX9-NEXT:    s_mul_hi_u32 s12, s3, s14
-; GFX9-NEXT:    s_add_u32 s10, s10, s13
-; GFX9-NEXT:    s_mul_hi_u32 s11, s3, s8
-; GFX9-NEXT:    s_addc_u32 s9, s9, s12
-; GFX9-NEXT:    s_addc_u32 s10, s11, 0
+; GFX9-NEXT:    s_mul_hi_u32 s13, s3, s11
+; GFX9-NEXT:    s_mul_i32 s11, s3, s11
+; GFX9-NEXT:    s_add_u32 s10, s10, s11
+; GFX9-NEXT:    s_mul_hi_u32 s12, s3, s8
+; GFX9-NEXT:    s_addc_u32 s9, s9, s13
+; GFX9-NEXT:    s_addc_u32 s10, s12, 0
 ; GFX9-NEXT:    s_mul_i32 s8, s3, s8
-; GFX9-NEXT:    s_add_u32 s12, s9, s8
-; GFX9-NEXT:    s_addc_u32 s13, 0, s10
-; GFX9-NEXT:    s_mul_i32 s8, s6, s13
-; GFX9-NEXT:    s_mul_hi_u32 s9, s6, s12
+; GFX9-NEXT:    s_add_u32 s11, s9, s8
+; GFX9-NEXT:    s_addc_u32 s10, 0, s10
+; GFX9-NEXT:    s_mul_i32 s8, s6, s10
+; GFX9-NEXT:    s_mul_hi_u32 s9, s6, s11
 ; GFX9-NEXT:    s_add_i32 s8, s9, s8
-; GFX9-NEXT:    s_mul_i32 s9, s7, s12
-; GFX9-NEXT:    s_add_i32 s14, s8, s9
-; GFX9-NEXT:    s_sub_i32 s10, s3, s14
-; GFX9-NEXT:    s_mul_i32 s8, s6, s12
-; GFX9-NEXT:    s_sub_i32 s15, s2, s8
+; GFX9-NEXT:    s_mul_i32 s9, s7, s11
+; GFX9-NEXT:    s_add_i32 s12, s8, s9
+; GFX9-NEXT:    s_sub_i32 s13, s3, s12
+; GFX9-NEXT:    s_mul_i32 s8, s6, s11
+; GFX9-NEXT:    s_sub_i32 s14, s2, s8
 ; GFX9-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
-; GFX9-NEXT:    s_subb_u32 s16, s10, s7
-; GFX9-NEXT:    s_sub_i32 s17, s15, s6
-; GFX9-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; GFX9-NEXT:    s_subb_u32 s10, s16, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s10, s7
-; GFX9-NEXT:    s_cselect_b32 s11, -1, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s17, s6
+; GFX9-NEXT:    s_subb_u32 s13, s13, s7
+; GFX9-NEXT:    s_sub_i32 s15, s14, s6
+; GFX9-NEXT:    s_subb_u32 s13, s13, 0
+; GFX9-NEXT:    s_cmp_ge_u32 s13, s7
 ; GFX9-NEXT:    s_cselect_b32 s16, -1, 0
-; GFX9-NEXT:    s_cmp_eq_u32 s10, s7
-; GFX9-NEXT:    s_cselect_b32 s10, s16, s11
-; GFX9-NEXT:    s_add_u32 s11, s12, 1
-; GFX9-NEXT:    s_addc_u32 s16, s13, 0
-; GFX9-NEXT:    s_add_u32 s17, s12, 2
-; GFX9-NEXT:    s_addc_u32 s18, s13, 0
-; GFX9-NEXT:    s_cmp_lg_u32 s10, 0
-; GFX9-NEXT:    s_cselect_b32 s10, s17, s11
-; GFX9-NEXT:    s_cselect_b32 s11, s18, s16
+; GFX9-NEXT:    s_cmp_ge_u32 s15, s6
+; GFX9-NEXT:    s_cselect_b32 s15, -1, 0
+; GFX9-NEXT:    s_cmp_eq_u32 s13, s7
+; GFX9-NEXT:    s_cselect_b32 s13, s15, s16
+; GFX9-NEXT:    s_add_u32 s15, s11, 1
+; GFX9-NEXT:    s_addc_u32 s16, s10, 0
+; GFX9-NEXT:    s_add_u32 s17, s11, 2
+; GFX9-NEXT:    s_addc_u32 s18, s10, 0
+; GFX9-NEXT:    s_cmp_lg_u32 s13, 0
+; GFX9-NEXT:    s_cselect_b32 s13, s17, s15
+; GFX9-NEXT:    s_cselect_b32 s15, s18, s16
 ; GFX9-NEXT:    s_cmp_lg_u64 s[8:9], 0
-; GFX9-NEXT:    s_subb_u32 s3, s3, s14
+; GFX9-NEXT:    s_subb_u32 s3, s3, s12
 ; GFX9-NEXT:    s_cmp_ge_u32 s3, s7
 ; GFX9-NEXT:    s_cselect_b32 s8, -1, 0
-; GFX9-NEXT:    s_cmp_ge_u32 s15, s6
+; GFX9-NEXT:    s_cmp_ge_u32 s14, s6
 ; GFX9-NEXT:    s_cselect_b32 s9, -1, 0
 ; GFX9-NEXT:    s_cmp_eq_u32 s3, s7
 ; GFX9-NEXT:    s_cselect_b32 s3, s9, s8
 ; GFX9-NEXT:    s_cmp_lg_u32 s3, 0
-; GFX9-NEXT:    s_cselect_b32 s9, s11, s13
-; GFX9-NEXT:    s_cselect_b32 s8, s10, s12
+; GFX9-NEXT:    s_cselect_b32 s9, s15, s10
+; GFX9-NEXT:    s_cselect_b32 s8, s13, s11
 ; GFX9-NEXT:    s_cbranch_execnz .LBB16_3
 ; GFX9-NEXT:  .LBB16_2:
 ; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
@@ -2477,44 +2457,40 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1010-NEXT:    s_add_u32 s11, s12, s11
 ; GFX1010-NEXT:    s_addc_u32 s12, 0, s13
 ; GFX1010-NEXT:    s_add_i32 s8, s8, s11
-; GFX1010-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX1010-NEXT:    s_mul_hi_u32 s13, s9, s8
-; GFX1010-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX1010-NEXT:    s_mul_i32 s11, s9, s8
 ; GFX1010-NEXT:    s_addc_u32 s5, s5, s12
-; GFX1010-NEXT:    s_mul_i32 s10, s10, s8
+; GFX1010-NEXT:    s_mul_hi_u32 s11, s9, s8
+; GFX1010-NEXT:    s_mul_i32 s12, s9, s8
 ; GFX1010-NEXT:    s_mul_i32 s9, s9, s5
-; GFX1010-NEXT:    s_mul_hi_u32 s12, s8, s11
-; GFX1010-NEXT:    s_add_i32 s9, s13, s9
-; GFX1010-NEXT:    s_mul_hi_u32 s13, s5, s11
+; GFX1010-NEXT:    s_mul_i32 s10, s10, s8
+; GFX1010-NEXT:    s_add_i32 s9, s11, s9
+; GFX1010-NEXT:    s_mul_i32 s11, s5, s12
 ; GFX1010-NEXT:    s_add_i32 s9, s9, s10
-; GFX1010-NEXT:    s_mul_i32 s10, s5, s11
+; GFX1010-NEXT:    s_mul_hi_u32 s10, s8, s12
 ; GFX1010-NEXT:    s_mul_i32 s15, s8, s9
 ; GFX1010-NEXT:    s_mul_hi_u32 s14, s8, s9
-; GFX1010-NEXT:    s_add_u32 s12, s12, s15
+; GFX1010-NEXT:    s_add_u32 s10, s10, s15
+; GFX1010-NEXT:    s_mul_hi_u32 s13, s5, s12
 ; GFX1010-NEXT:    s_addc_u32 s14, 0, s14
-; GFX1010-NEXT:    s_mul_hi_u32 s11, s5, s9
-; GFX1010-NEXT:    s_add_u32 s10, s12, s10
+; GFX1010-NEXT:    s_mul_hi_u32 s12, s5, s9
+; GFX1010-NEXT:    s_add_u32 s10, s10, s11
 ; GFX1010-NEXT:    s_mul_i32 s9, s5, s9
 ; GFX1010-NEXT:    s_addc_u32 s10, s14, s13
-; GFX1010-NEXT:    s_addc_u32 s11, s11, 0
+; GFX1010-NEXT:    s_addc_u32 s11, s12, 0
 ; GFX1010-NEXT:    s_add_u32 s9, s10, s9
 ; GFX1010-NEXT:    s_addc_u32 s10, 0, s11
 ; GFX1010-NEXT:    s_add_i32 s8, s8, s9
-; GFX1010-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX1010-NEXT:    s_mul_hi_u32 s11, s2, s8
-; GFX1010-NEXT:    s_cmp_lg_u32 s9, 0
-; GFX1010-NEXT:    s_mul_hi_u32 s9, s3, s8
 ; GFX1010-NEXT:    s_addc_u32 s5, s5, s10
-; GFX1010-NEXT:    s_mul_i32 s8, s3, s8
+; GFX1010-NEXT:    s_mul_hi_u32 s9, s2, s8
 ; GFX1010-NEXT:    s_mul_i32 s12, s2, s5
-; GFX1010-NEXT:    s_mul_hi_u32 s10, s2, s5
-; GFX1010-NEXT:    s_add_u32 s11, s11, s12
-; GFX1010-NEXT:    s_addc_u32 s10, 0, s10
+; GFX1010-NEXT:    s_mul_hi_u32 s11, s2, s5
+; GFX1010-NEXT:    s_mul_hi_u32 s10, s3, s8
+; GFX1010-NEXT:    s_mul_i32 s8, s3, s8
+; GFX1010-NEXT:    s_add_u32 s9, s9, s12
+; GFX1010-NEXT:    s_addc_u32 s11, 0, s11
 ; GFX1010-NEXT:    s_mul_hi_u32 s13, s3, s5
-; GFX1010-NEXT:    s_add_u32 s8, s11, s8
+; GFX1010-NEXT:    s_add_u32 s8, s9, s8
 ; GFX1010-NEXT:    s_mul_i32 s5, s3, s5
-; GFX1010-NEXT:    s_addc_u32 s8, s10, s9
+; GFX1010-NEXT:    s_addc_u32 s8, s11, s10
 ; GFX1010-NEXT:    s_addc_u32 s9, s13, 0
 ; GFX1010-NEXT:    s_add_u32 s5, s8, s5
 ; GFX1010-NEXT:    s_addc_u32 s8, 0, s9
@@ -2527,11 +2503,8 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1010-NEXT:    s_sub_i32 s11, s3, s9
 ; GFX1010-NEXT:    s_sub_i32 s10, s2, s10
 ; GFX1010-NEXT:    s_cselect_b32 s12, 1, 0
-; GFX1010-NEXT:    s_cmp_lg_u32 s12, 0
 ; GFX1010-NEXT:    s_subb_u32 s11, s11, s7
 ; GFX1010-NEXT:    s_sub_i32 s13, s10, s6
-; GFX1010-NEXT:    s_cselect_b32 s14, 1, 0
-; GFX1010-NEXT:    s_cmp_lg_u32 s14, 0
 ; GFX1010-NEXT:    s_subb_u32 s11, s11, 0
 ; GFX1010-NEXT:    s_cmp_ge_u32 s11, s7
 ; GFX1010-NEXT:    s_cselect_b32 s14, -1, 0
@@ -2637,44 +2610,40 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1030W32-NEXT:    s_add_u32 s11, s12, s11
 ; GFX1030W32-NEXT:    s_addc_u32 s12, 0, s13
 ; GFX1030W32-NEXT:    s_add_i32 s8, s8, s11
-; GFX1030W32-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s9, s8
-; GFX1030W32-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX1030W32-NEXT:    s_mul_i32 s11, s9, s8
 ; GFX1030W32-NEXT:    s_addc_u32 s7, s7, s12
-; GFX1030W32-NEXT:    s_mul_i32 s10, s10, s8
+; GFX1030W32-NEXT:    s_mul_hi_u32 s11, s9, s8
+; GFX1030W32-NEXT:    s_mul_i32 s12, s9, s8
 ; GFX1030W32-NEXT:    s_mul_i32 s9, s9, s7
-; GFX1030W32-NEXT:    s_mul_hi_u32 s12, s8, s11
-; GFX1030W32-NEXT:    s_add_i32 s9, s13, s9
-; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s7, s11
+; GFX1030W32-NEXT:    s_mul_i32 s10, s10, s8
+; GFX1030W32-NEXT:    s_add_i32 s9, s11, s9
+; GFX1030W32-NEXT:    s_mul_i32 s11, s7, s12
 ; GFX1030W32-NEXT:    s_add_i32 s9, s9, s10
-; GFX1030W32-NEXT:    s_mul_i32 s10, s7, s11
+; GFX1030W32-NEXT:    s_mul_hi_u32 s10, s8, s12
 ; GFX1030W32-NEXT:    s_mul_i32 s15, s8, s9
 ; GFX1030W32-NEXT:    s_mul_hi_u32 s14, s8, s9
-; GFX1030W32-NEXT:    s_add_u32 s12, s12, s15
+; GFX1030W32-NEXT:    s_add_u32 s10, s10, s15
+; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s7, s12
 ; GFX1030W32-NEXT:    s_addc_u32 s14, 0, s14
-; GFX1030W32-NEXT:    s_mul_hi_u32 s11, s7, s9
-; GFX1030W32-NEXT:    s_add_u32 s10, s12, s10
+; GFX1030W32-NEXT:    s_mul_hi_u32 s12, s7, s9
+; GFX1030W32-NEXT:    s_add_u32 s10, s10, s11
 ; GFX1030W32-NEXT:    s_mul_i32 s9, s7, s9
 ; GFX1030W32-NEXT:    s_addc_u32 s10, s14, s13
-; GFX1030W32-NEXT:    s_addc_u32 s11, s11, 0
+; GFX1030W32-NEXT:    s_addc_u32 s11, s12, 0
 ; GFX1030W32-NEXT:    s_add_u32 s9, s10, s9
 ; GFX1030W32-NEXT:    s_addc_u32 s10, 0, s11
 ; GFX1030W32-NEXT:    s_add_i32 s8, s8, s9
-; GFX1030W32-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX1030W32-NEXT:    s_mul_hi_u32 s11, s2, s8
-; GFX1030W32-NEXT:    s_cmp_lg_u32 s9, 0
-; GFX1030W32-NEXT:    s_mul_hi_u32 s9, s3, s8
 ; GFX1030W32-NEXT:    s_addc_u32 s7, s7, s10
-; GFX1030W32-NEXT:    s_mul_i32 s8, s3, s8
+; GFX1030W32-NEXT:    s_mul_hi_u32 s9, s2, s8
 ; GFX1030W32-NEXT:    s_mul_i32 s12, s2, s7
-; GFX1030W32-NEXT:    s_mul_hi_u32 s10, s2, s7
-; GFX1030W32-NEXT:    s_add_u32 s11, s11, s12
-; GFX1030W32-NEXT:    s_addc_u32 s10, 0, s10
+; GFX1030W32-NEXT:    s_mul_hi_u32 s11, s2, s7
+; GFX1030W32-NEXT:    s_mul_hi_u32 s10, s3, s8
+; GFX1030W32-NEXT:    s_mul_i32 s8, s3, s8
+; GFX1030W32-NEXT:    s_add_u32 s9, s9, s12
+; GFX1030W32-NEXT:    s_addc_u32 s11, 0, s11
 ; GFX1030W32-NEXT:    s_mul_hi_u32 s13, s3, s7
-; GFX1030W32-NEXT:    s_add_u32 s8, s11, s8
+; GFX1030W32-NEXT:    s_add_u32 s8, s9, s8
 ; GFX1030W32-NEXT:    s_mul_i32 s7, s3, s7
-; GFX1030W32-NEXT:    s_addc_u32 s8, s10, s9
+; GFX1030W32-NEXT:    s_addc_u32 s8, s11, s10
 ; GFX1030W32-NEXT:    s_addc_u32 s9, s13, 0
 ; GFX1030W32-NEXT:    s_add_u32 s7, s8, s7
 ; GFX1030W32-NEXT:    s_addc_u32 s8, 0, s9
@@ -2687,11 +2656,8 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1030W32-NEXT:    s_sub_i32 s11, s3, s9
 ; GFX1030W32-NEXT:    s_sub_i32 s10, s2, s10
 ; GFX1030W32-NEXT:    s_cselect_b32 s12, 1, 0
-; GFX1030W32-NEXT:    s_cmp_lg_u32 s12, 0
 ; GFX1030W32-NEXT:    s_subb_u32 s11, s11, s5
 ; GFX1030W32-NEXT:    s_sub_i32 s13, s10, s4
-; GFX1030W32-NEXT:    s_cselect_b32 s14, 1, 0
-; GFX1030W32-NEXT:    s_cmp_lg_u32 s14, 0
 ; GFX1030W32-NEXT:    s_subb_u32 s11, s11, 0
 ; GFX1030W32-NEXT:    s_cmp_ge_u32 s11, s5
 ; GFX1030W32-NEXT:    s_cselect_b32 s14, -1, 0
@@ -2764,8 +2730,8 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1030W64-NEXT:  ; %bb.1:
 ; GFX1030W64-NEXT:    v_cvt_f32_u32_e32 v0, s4
 ; GFX1030W64-NEXT:    v_cvt_f32_u32_e32 v1, s5
-; GFX1030W64-NEXT:    s_sub_u32 s9, 0, s4
-; GFX1030W64-NEXT:    s_subb_u32 s10, 0, s5
+; GFX1030W64-NEXT:    s_sub_u32 s8, 0, s4
+; GFX1030W64-NEXT:    s_subb_u32 s9, 0, s5
 ; GFX1030W64-NEXT:    v_fmamk_f32 v0, v1, 0x4f800000, v0
 ; GFX1030W64-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX1030W64-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2774,109 +2740,102 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1030W64-NEXT:    v_fmamk_f32 v0, v1, 0xcf800000, v0
 ; GFX1030W64-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX1030W64-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1030W64-NEXT:    v_readfirstlane_b32 s8, v1
-; GFX1030W64-NEXT:    v_readfirstlane_b32 s6, v0
-; GFX1030W64-NEXT:    s_mul_i32 s7, s9, s8
-; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s9, s6
-; GFX1030W64-NEXT:    s_mul_i32 s11, s10, s6
-; GFX1030W64-NEXT:    s_add_i32 s7, s12, s7
-; GFX1030W64-NEXT:    s_mul_i32 s13, s9, s6
-; GFX1030W64-NEXT:    s_add_i32 s7, s7, s11
-; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s6, s13
-; GFX1030W64-NEXT:    s_mul_i32 s15, s6, s7
-; GFX1030W64-NEXT:    s_mul_hi_u32 s14, s8, s13
-; GFX1030W64-NEXT:    s_mul_i32 s11, s8, s13
-; GFX1030W64-NEXT:    s_mul_hi_u32 s13, s6, s7
+; GFX1030W64-NEXT:    v_readfirstlane_b32 s6, v1
+; GFX1030W64-NEXT:    v_readfirstlane_b32 s7, v0
+; GFX1030W64-NEXT:    s_mul_i32 s10, s8, s6
+; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s8, s7
+; GFX1030W64-NEXT:    s_mul_i32 s11, s9, s7
+; GFX1030W64-NEXT:    s_add_i32 s10, s12, s10
+; GFX1030W64-NEXT:    s_mul_i32 s13, s8, s7
+; GFX1030W64-NEXT:    s_add_i32 s10, s10, s11
+; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s7, s13
+; GFX1030W64-NEXT:    s_mul_i32 s15, s7, s10
+; GFX1030W64-NEXT:    s_mul_hi_u32 s14, s6, s13
+; GFX1030W64-NEXT:    s_mul_i32 s11, s6, s13
+; GFX1030W64-NEXT:    s_mul_hi_u32 s13, s7, s10
 ; GFX1030W64-NEXT:    s_add_u32 s12, s12, s15
 ; GFX1030W64-NEXT:    s_addc_u32 s13, 0, s13
-; GFX1030W64-NEXT:    s_mul_hi_u32 s16, s8, s7
+; GFX1030W64-NEXT:    s_mul_hi_u32 s16, s6, s10
 ; GFX1030W64-NEXT:    s_add_u32 s11, s12, s11
-; GFX1030W64-NEXT:    s_mul_i32 s7, s8, s7
+; GFX1030W64-NEXT:    s_mul_i32 s10, s6, s10
 ; GFX1030W64-NEXT:    s_addc_u32 s11, s13, s14
 ; GFX1030W64-NEXT:    s_addc_u32 s12, s16, 0
-; GFX1030W64-NEXT:    s_add_u32 s7, s11, s7
+; GFX1030W64-NEXT:    s_add_u32 s10, s11, s10
 ; GFX1030W64-NEXT:    s_addc_u32 s11, 0, s12
-; GFX1030W64-NEXT:    s_add_i32 s12, s6, s7
-; GFX1030W64-NEXT:    s_cselect_b64 s[6:7], 1, 0
-; GFX1030W64-NEXT:    s_mul_hi_u32 s13, s9, s12
-; GFX1030W64-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX1030W64-NEXT:    s_mul_i32 s6, s9, s12
-; GFX1030W64-NEXT:    s_addc_u32 s8, s8, s11
-; GFX1030W64-NEXT:    s_mul_i32 s10, s10, s12
-; GFX1030W64-NEXT:    s_mul_i32 s9, s9, s8
-; GFX1030W64-NEXT:    s_mul_hi_u32 s7, s12, s6
-; GFX1030W64-NEXT:    s_add_i32 s9, s13, s9
-; GFX1030W64-NEXT:    s_mul_hi_u32 s11, s8, s6
-; GFX1030W64-NEXT:    s_add_i32 s9, s9, s10
-; GFX1030W64-NEXT:    s_mul_i32 s6, s8, s6
-; GFX1030W64-NEXT:    s_mul_i32 s14, s12, s9
-; GFX1030W64-NEXT:    s_mul_hi_u32 s13, s12, s9
-; GFX1030W64-NEXT:    s_add_u32 s7, s7, s14
+; GFX1030W64-NEXT:    s_add_i32 s7, s7, s10
+; GFX1030W64-NEXT:    s_addc_u32 s6, s6, s11
+; GFX1030W64-NEXT:    s_mul_hi_u32 s10, s8, s7
+; GFX1030W64-NEXT:    s_mul_i32 s11, s8, s7
+; GFX1030W64-NEXT:    s_mul_i32 s8, s8, s6
+; GFX1030W64-NEXT:    s_mul_i32 s9, s9, s7
+; GFX1030W64-NEXT:    s_add_i32 s8, s10, s8
+; GFX1030W64-NEXT:    s_mul_i32 s10, s6, s11
+; GFX1030W64-NEXT:    s_add_i32 s8, s8, s9
+; GFX1030W64-NEXT:    s_mul_hi_u32 s9, s7, s11
+; GFX1030W64-NEXT:    s_mul_i32 s14, s7, s8
+; GFX1030W64-NEXT:    s_mul_hi_u32 s13, s7, s8
+; GFX1030W64-NEXT:    s_add_u32 s9, s9, s14
+; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s6, s11
 ; GFX1030W64-NEXT:    s_addc_u32 s13, 0, s13
-; GFX1030W64-NEXT:    s_mul_hi_u32 s10, s8, s9
-; GFX1030W64-NEXT:    s_add_u32 s6, s7, s6
-; GFX1030W64-NEXT:    s_mul_i32 s9, s8, s9
-; GFX1030W64-NEXT:    s_addc_u32 s6, s13, s11
-; GFX1030W64-NEXT:    s_addc_u32 s7, s10, 0
-; GFX1030W64-NEXT:    s_add_u32 s6, s6, s9
-; GFX1030W64-NEXT:    s_addc_u32 s9, 0, s7
-; GFX1030W64-NEXT:    s_add_i32 s12, s12, s6
-; GFX1030W64-NEXT:    s_cselect_b64 s[6:7], 1, 0
-; GFX1030W64-NEXT:    s_mul_hi_u32 s10, s2, s12
-; GFX1030W64-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX1030W64-NEXT:    s_mul_hi_u32 s6, s3, s12
-; GFX1030W64-NEXT:    s_addc_u32 s7, s8, s9
-; GFX1030W64-NEXT:    s_mul_i32 s8, s3, s12
-; GFX1030W64-NEXT:    s_mul_i32 s11, s2, s7
-; GFX1030W64-NEXT:    s_mul_hi_u32 s9, s2, s7
-; GFX1030W64-NEXT:    s_add_u32 s10, s10, s11
-; GFX1030W64-NEXT:    s_addc_u32 s9, 0, s9
-; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s3, s7
-; GFX1030W64-NEXT:    s_add_u32 s8, s10, s8
+; GFX1030W64-NEXT:    s_mul_hi_u32 s11, s6, s8
+; GFX1030W64-NEXT:    s_add_u32 s9, s9, s10
+; GFX1030W64-NEXT:    s_mul_i32 s8, s6, s8
+; GFX1030W64-NEXT:    s_addc_u32 s9, s13, s12
+; GFX1030W64-NEXT:    s_addc_u32 s10, s11, 0
+; GFX1030W64-NEXT:    s_add_u32 s8, s9, s8
+; GFX1030W64-NEXT:    s_addc_u32 s9, 0, s10
+; GFX1030W64-NEXT:    s_add_i32 s7, s7, s8
+; GFX1030W64-NEXT:    s_addc_u32 s6, s6, s9
+; GFX1030W64-NEXT:    s_mul_hi_u32 s8, s2, s7
+; GFX1030W64-NEXT:    s_mul_i32 s11, s2, s6
+; GFX1030W64-NEXT:    s_mul_hi_u32 s10, s2, s6
+; GFX1030W64-NEXT:    s_mul_hi_u32 s9, s3, s7
 ; GFX1030W64-NEXT:    s_mul_i32 s7, s3, s7
-; GFX1030W64-NEXT:    s_addc_u32 s6, s9, s6
+; GFX1030W64-NEXT:    s_add_u32 s8, s8, s11
+; GFX1030W64-NEXT:    s_addc_u32 s10, 0, s10
+; GFX1030W64-NEXT:    s_mul_hi_u32 s12, s3, s6
+; GFX1030W64-NEXT:    s_add_u32 s7, s8, s7
+; GFX1030W64-NEXT:    s_mul_i32 s6, s3, s6
+; GFX1030W64-NEXT:    s_addc_u32 s7, s10, s9
 ; GFX1030W64-NEXT:    s_addc_u32 s8, s12, 0
-; GFX1030W64-NEXT:    s_add_u32 s10, s6, s7
+; GFX1030W64-NEXT:    s_add_u32 s10, s7, s6
 ; GFX1030W64-NEXT:    s_addc_u32 s11, 0, s8
 ; GFX1030W64-NEXT:    s_mul_hi_u32 s6, s4, s10
 ; GFX1030W64-NEXT:    s_mul_i32 s7, s4, s11
 ; GFX1030W64-NEXT:    s_mul_i32 s8, s5, s10
 ; GFX1030W64-NEXT:    s_add_i32 s6, s6, s7
-; GFX1030W64-NEXT:    s_add_i32 s12, s6, s8
+; GFX1030W64-NEXT:    s_add_i32 s8, s6, s8
 ; GFX1030W64-NEXT:    s_mul_i32 s6, s4, s10
-; GFX1030W64-NEXT:    s_sub_i32 s8, s3, s12
-; GFX1030W64-NEXT:    s_sub_i32 s13, s2, s6
+; GFX1030W64-NEXT:    s_sub_i32 s9, s3, s8
+; GFX1030W64-NEXT:    s_sub_i32 s12, s2, s6
 ; GFX1030W64-NEXT:    s_cselect_b64 s[6:7], 1, 0
-; GFX1030W64-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX1030W64-NEXT:    s_subb_u32 s14, s8, s5
-; GFX1030W64-NEXT:    s_sub_i32 s15, s13, s4
-; GFX1030W64-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GFX1030W64-NEXT:    s_cmp_lg_u64 s[8:9], 0
-; GFX1030W64-NEXT:    s_subb_u32 s8, s14, 0
-; GFX1030W64-NEXT:    s_cmp_ge_u32 s8, s5
-; GFX1030W64-NEXT:    s_cselect_b32 s9, -1, 0
-; GFX1030W64-NEXT:    s_cmp_ge_u32 s15, s4
+; GFX1030W64-NEXT:    s_subb_u32 s9, s9, s5
+; GFX1030W64-NEXT:    s_sub_i32 s13, s12, s4
+; GFX1030W64-NEXT:    s_subb_u32 s9, s9, 0
+; GFX1030W64-NEXT:    s_cmp_ge_u32 s9, s5
 ; GFX1030W64-NEXT:    s_cselect_b32 s14, -1, 0
-; GFX1030W64-NEXT:    s_cmp_eq_u32 s8, s5
-; GFX1030W64-NEXT:    s_cselect_b32 s8, s14, s9
-; GFX1030W64-NEXT:    s_add_u32 s9, s10, 1
+; GFX1030W64-NEXT:    s_cmp_ge_u32 s13, s4
+; GFX1030W64-NEXT:    s_cselect_b32 s13, -1, 0
+; GFX1030W64-NEXT:    s_cmp_eq_u32 s9, s5
+; GFX1030W64-NEXT:    s_cselect_b32 s9, s13, s14
+; GFX1030W64-NEXT:    s_add_u32 s13, s10, 1
 ; GFX1030W64-NEXT:    s_addc_u32 s14, s11, 0
 ; GFX1030W64-NEXT:    s_add_u32 s15, s10, 2
 ; GFX1030W64-NEXT:    s_addc_u32 s16, s11, 0
-; GFX1030W64-NEXT:    s_cmp_lg_u32 s8, 0
-; GFX1030W64-NEXT:    s_cselect_b32 s15, s15, s9
+; GFX1030W64-NEXT:    s_cmp_lg_u32 s9, 0
+; GFX1030W64-NEXT:    s_cselect_b32 s13, s15, s13
 ; GFX1030W64-NEXT:    s_cselect_b32 s14, s16, s14
 ; GFX1030W64-NEXT:    s_cmp_lg_u64 s[6:7], 0
-; GFX1030W64-NEXT:    s_subb_u32 s3, s3, s12
+; GFX1030W64-NEXT:    s_subb_u32 s3, s3, s8
 ; GFX1030W64-NEXT:    s_cmp_ge_u32 s3, s5
 ; GFX1030W64-NEXT:    s_cselect_b32 s6, -1, 0
-; GFX1030W64-NEXT:    s_cmp_ge_u32 s13, s4
+; GFX1030W64-NEXT:    s_cmp_ge_u32 s12, s4
 ; GFX1030W64-NEXT:    s_cselect_b32 s7, -1, 0
 ; GFX1030W64-NEXT:    s_cmp_eq_u32 s3, s5
 ; GFX1030W64-NEXT:    s_cselect_b32 s3, s7, s6
 ; GFX1030W64-NEXT:    s_cmp_lg_u32 s3, 0
 ; GFX1030W64-NEXT:    s_cselect_b32 s7, s14, s11
-; GFX1030W64-NEXT:    s_cselect_b32 s6, s15, s10
+; GFX1030W64-NEXT:    s_cselect_b32 s6, s13, s10
 ; GFX1030W64-NEXT:    s_cbranch_execnz .LBB16_3
 ; GFX1030W64-NEXT:  .LBB16_2:
 ; GFX1030W64-NEXT:    v_cvt_f32_u32_e32 v0, s4
@@ -2962,44 +2921,40 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX11-NEXT:    s_add_u32 s11, s12, s11
 ; GFX11-NEXT:    s_addc_u32 s12, 0, s13
 ; GFX11-NEXT:    s_add_i32 s8, s8, s11
-; GFX11-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX11-NEXT:    s_mul_hi_u32 s13, s9, s8
-; GFX11-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX11-NEXT:    s_mul_i32 s11, s9, s8
 ; GFX11-NEXT:    s_addc_u32 s7, s7, s12
-; GFX11-NEXT:    s_mul_i32 s10, s10, s8
+; GFX11-NEXT:    s_mul_hi_u32 s11, s9, s8
+; GFX11-NEXT:    s_mul_i32 s12, s9, s8
 ; GFX11-NEXT:    s_mul_i32 s9, s9, s7
-; GFX11-NEXT:    s_mul_hi_u32 s12, s8, s11
-; GFX11-NEXT:    s_add_i32 s9, s13, s9
-; GFX11-NEXT:    s_mul_hi_u32 s13, s7, s11
+; GFX11-NEXT:    s_mul_i32 s10, s10, s8
+; GFX11-NEXT:    s_add_i32 s9, s11, s9
+; GFX11-NEXT:    s_mul_i32 s11, s7, s12
 ; GFX11-NEXT:    s_add_i32 s9, s9, s10
-; GFX11-NEXT:    s_mul_i32 s10, s7, s11
+; GFX11-NEXT:    s_mul_hi_u32 s10, s8, s12
 ; GFX11-NEXT:    s_mul_i32 s15, s8, s9
 ; GFX11-NEXT:    s_mul_hi_u32 s14, s8, s9
-; GFX11-NEXT:    s_add_u32 s12, s12, s15
+; GFX11-NEXT:    s_add_u32 s10, s10, s15
+; GFX11-NEXT:    s_mul_hi_u32 s13, s7, s12
 ; GFX11-NEXT:    s_addc_u32 s14, 0, s14
-; GFX11-NEXT:    s_mul_hi_u32 s11, s7, s9
-; GFX11-NEXT:    s_add_u32 s10, s12, s10
+; GFX11-NEXT:    s_mul_hi_u32 s12, s7, s9
+; GFX11-NEXT:    s_add_u32 s10, s10, s11
 ; GFX11-NEXT:    s_mul_i32 s9, s7, s9
 ; GFX11-NEXT:    s_addc_u32 s10, s14, s13
-; GFX11-NEXT:    s_addc_u32 s11, s11, 0
+; GFX11-NEXT:    s_addc_u32 s11, s12, 0
 ; GFX11-NEXT:    s_add_u32 s9, s10, s9
 ; GFX11-NEXT:    s_addc_u32 s10, 0, s11
 ; GFX11-NEXT:    s_add_i32 s8, s8, s9
-; GFX11-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX11-NEXT:    s_mul_hi_u32 s11, s2, s8
-; GFX11-NEXT:    s_cmp_lg_u32 s9, 0
-; GFX11-NEXT:    s_mul_hi_u32 s9, s3, s8
 ; GFX11-NEXT:    s_addc_u32 s7, s7, s10
-; GFX11-NEXT:    s_mul_i32 s8, s3, s8
+; GFX11-NEXT:    s_mul_hi_u32 s9, s2, s8
 ; GFX11-NEXT:    s_mul_i32 s12, s2, s7
-; GFX11-NEXT:    s_mul_hi_u32 s10, s2, s7
-; GFX11-NEXT:    s_add_u32 s11, s11, s12
-; GFX11-NEXT:    s_addc_u32 s10, 0, s10
+; GFX11-NEXT:    s_mul_hi_u32 s11, s2, s7
+; GFX11-NEXT:    s_mul_hi_u32 s10, s3, s8
+; GFX11-NEXT:    s_mul_i32 s8, s3, s8
+; GFX11-NEXT:    s_add_u32 s9, s9, s12
+; GFX11-NEXT:    s_addc_u32 s11, 0, s11
 ; GFX11-NEXT:    s_mul_hi_u32 s13, s3, s7
-; GFX11-NEXT:    s_add_u32 s8, s11, s8
+; GFX11-NEXT:    s_add_u32 s8, s9, s8
 ; GFX11-NEXT:    s_mul_i32 s7, s3, s7
-; GFX11-NEXT:    s_addc_u32 s8, s10, s9
+; GFX11-NEXT:    s_addc_u32 s8, s11, s10
 ; GFX11-NEXT:    s_addc_u32 s9, s13, 0
 ; GFX11-NEXT:    s_add_u32 s7, s8, s7
 ; GFX11-NEXT:    s_addc_u32 s8, 0, s9
@@ -3009,17 +2964,14 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX11-NEXT:    s_add_i32 s9, s9, s10
 ; GFX11-NEXT:    s_mul_i32 s10, s4, s7
 ; GFX11-NEXT:    s_add_i32 s9, s9, s11
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_sub_i32 s11, s3, s9
 ; GFX11-NEXT:    s_sub_i32 s10, s2, s10
 ; GFX11-NEXT:    s_cselect_b32 s12, 1, 0
-; GFX11-NEXT:    s_cmp_lg_u32 s12, 0
 ; GFX11-NEXT:    s_subb_u32 s11, s11, s5
 ; GFX11-NEXT:    s_sub_i32 s13, s10, s4
-; GFX11-NEXT:    s_cselect_b32 s14, 1, 0
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_cmp_lg_u32 s14, 0
 ; GFX11-NEXT:    s_subb_u32 s11, s11, 0
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_cmp_ge_u32 s11, s5
 ; GFX11-NEXT:    s_cselect_b32 s14, -1, 0
 ; GFX11-NEXT:    s_cmp_ge_u32 s13, s4
@@ -3129,12 +3081,9 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1250-NEXT:    s_add_nc_u64 s[12:13], s[6:7], s[12:13]
 ; GFX1250-NEXT:    s_add_co_i32 s8, s8, s12
-; GFX1250-NEXT:    s_cselect_b32 s6, 1, 0
-; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_cmp_lg_u32 s6, 0
 ; GFX1250-NEXT:    s_add_co_ci_u32 s9, s9, s13
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1250-NEXT:    s_mul_u64 s[10:11], s[10:11], s[8:9]
-; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1250-NEXT:    s_mul_hi_u32 s13, s8, s11
 ; GFX1250-NEXT:    s_mul_i32 s12, s8, s11
 ; GFX1250-NEXT:    s_mul_hi_u32 s6, s8, s10
@@ -3149,19 +3098,17 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1250-NEXT:    s_add_nc_u64 s[10:11], s[6:7], s[10:11]
 ; GFX1250-NEXT:    s_add_co_i32 s8, s8, s10
-; GFX1250-NEXT:    s_cselect_b32 s10, 1, 0
-; GFX1250-NEXT:    s_mul_hi_u32 s6, s2, s8
-; GFX1250-NEXT:    s_cmp_lg_u32 s10, 0
-; GFX1250-NEXT:    s_mul_hi_u32 s12, s3, s8
 ; GFX1250-NEXT:    s_add_co_ci_u32 s10, s9, s11
-; GFX1250-NEXT:    s_mul_i32 s11, s3, s8
+; GFX1250-NEXT:    s_mul_hi_u32 s6, s2, s8
+; GFX1250-NEXT:    s_mul_hi_u32 s11, s3, s8
+; GFX1250-NEXT:    s_mul_i32 s12, s3, s8
 ; GFX1250-NEXT:    s_mul_hi_u32 s9, s2, s10
 ; GFX1250-NEXT:    s_mul_i32 s8, s2, s10
 ; GFX1250-NEXT:    s_mul_hi_u32 s13, s3, s10
 ; GFX1250-NEXT:    s_add_nc_u64 s[8:9], s[6:7], s[8:9]
 ; GFX1250-NEXT:    s_mul_i32 s10, s3, s10
-; GFX1250-NEXT:    s_add_co_u32 s6, s8, s11
-; GFX1250-NEXT:    s_add_co_ci_u32 s6, s9, s12
+; GFX1250-NEXT:    s_add_co_u32 s6, s8, s12
+; GFX1250-NEXT:    s_add_co_ci_u32 s6, s9, s11
 ; GFX1250-NEXT:    s_add_co_ci_u32 s11, s13, 0
 ; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX1250-NEXT:    s_add_nc_u64 s[8:9], s[6:7], s[10:11]
@@ -3176,10 +3123,8 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GFX1250-NEXT:    s_cmp_lg_u32 s8, 0
 ; GFX1250-NEXT:    s_sub_co_ci_u32 s12, s12, s5
 ; GFX1250-NEXT:    s_sub_co_i32 s13, s6, s4
-; GFX1250-NEXT:    s_cselect_b32 s14, 1, 0
-; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1250-NEXT:    s_cmp_lg_u32 s14, 0
 ; GFX1250-NEXT:    s_sub_co_ci_u32 s12, s12, 0
+; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX1250-NEXT:    s_cmp_ge_u32 s12, s5
 ; GFX1250-NEXT:    s_cselect_b32 s14, -1, 0
 ; GFX1250-NEXT:    s_cmp_ge_u32 s13, s4
diff --git a/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll b/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
index eee9715f8de5b..0c0be7abcb6d3 100644
--- a/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
+++ b/llvm/test/CodeGen/AMDGPU/expand-scalar-carry-out-select-user.ll
@@ -12,9 +12,6 @@ define i32 @s_add_co_select_user() {
 ; GFX7-NEXT:    s_load_dword s6, s[4:5], 0x0
 ; GFX7-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX7-NEXT:    s_add_i32 s7, s6, s6
-; GFX7-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX7-NEXT:    s_or_b32 s4, s4, s5
-; GFX7-NEXT:    s_cmp_lg_u32 s4, 0
 ; GFX7-NEXT:    s_addc_u32 s8, s6, 0
 ; GFX7-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; GFX7-NEXT:    s_and_b64 s[4:5], s[4:5], exec
@@ -31,8 +28,6 @@ define i32 @s_add_co_select_user() {
 ; GFX9-NEXT:    s_load_dword s6, s[4:5], 0x0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_add_i32 s7, s6, s6
-; GFX9-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; GFX9-NEXT:    s_addc_u32 s8, s6, 0
 ; GFX9-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; GFX9-NEXT:    s_and_b64 s[4:5], s[4:5], exec
@@ -49,8 +44,6 @@ define i32 @s_add_co_select_user() {
 ; GFX10-NEXT:    s_load_dword s4, s[4:5], 0x0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_add_i32 s5, s4, s4
-; GFX10-NEXT:    s_cselect_b32 s6, 1, 0
-; GFX10-NEXT:    s_cmp_lg_u32 s6, 0
 ; GFX10-NEXT:    s_addc_u32 s6, s4, 0
 ; GFX10-NEXT:    s_cselect_b32 s7, -1, 0
 ; GFX10-NEXT:    s_and_b32 s7, s7, exec_lo
@@ -67,16 +60,13 @@ define i32 @s_add_co_select_user() {
 ; GFX11-NEXT:    s_load_b32 s0, s[0:1], 0x0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_add_i32 s1, s0, s0
-; GFX11-NEXT:    s_cselect_b32 s2, 1, 0
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_cmp_lg_u32 s2, 0
 ; GFX11-NEXT:    s_addc_u32 s2, s0, 0
 ; GFX11-NEXT:    s_cselect_b32 s3, -1, 0
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_3) | instid1(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_and_b32 s3, s3, exec_lo
 ; GFX11-NEXT:    s_cselect_b32 s2, s2, 0
 ; GFX11-NEXT:    s_cmp_gt_u32 s0, 31
 ; GFX11-NEXT:    s_cselect_b32 s0, s1, s2
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
 ; GFX11-NEXT:    v_mov_b32_e32 v0, s0
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 bb:
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 1111baca8dce5..efcd11de03de1 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -8,7 +8,6 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    s_load_dwordx2 s[0:1], s[4:5], 0xd
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    s_ashr_i32 s8, s1, 31
 ; GCN-NEXT:    s_add_u32 s0, s0, s8
@@ -17,8 +16,8 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    s_xor_b64 s[10:11], s[0:1], s[8:9]
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s10
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s11
-; GCN-NEXT:    s_sub_u32 s12, 0, s10
-; GCN-NEXT:    s_subb_u32 s13, 0, s11
+; GCN-NEXT:    s_sub_u32 s0, 0, s10
+; GCN-NEXT:    s_subb_u32 s1, 0, s11
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -27,95 +26,89 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, s12, v0
-; GCN-NEXT:    v_readfirstlane_b32 s14, v1
-; GCN-NEXT:    v_readfirstlane_b32 s0, v0
-; GCN-NEXT:    s_mul_i32 s1, s12, s14
-; GCN-NEXT:    v_readfirstlane_b32 s17, v2
-; GCN-NEXT:    s_mul_i32 s15, s13, s0
-; GCN-NEXT:    s_mul_i32 s16, s12, s0
-; GCN-NEXT:    s_add_i32 s1, s17, s1
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s16
-; GCN-NEXT:    s_add_i32 s1, s1, s15
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s1
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, s16
-; GCN-NEXT:    v_readfirstlane_b32 s15, v3
-; GCN-NEXT:    s_mul_i32 s17, s0, s1
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, s1
-; GCN-NEXT:    s_add_u32 s15, s15, s17
-; GCN-NEXT:    v_readfirstlane_b32 s17, v0
-; GCN-NEXT:    s_addc_u32 s17, 0, s17
-; GCN-NEXT:    s_mul_i32 s16, s14, s16
-; GCN-NEXT:    v_readfirstlane_b32 s18, v4
-; GCN-NEXT:    s_add_u32 s15, s15, s16
-; GCN-NEXT:    s_addc_u32 s15, s17, s18
-; GCN-NEXT:    v_readfirstlane_b32 s16, v1
-; GCN-NEXT:    s_addc_u32 s16, s16, 0
-; GCN-NEXT:    s_mul_i32 s1, s14, s1
-; GCN-NEXT:    s_add_u32 s1, s15, s1
-; GCN-NEXT:    s_addc_u32 s15, 0, s16
-; GCN-NEXT:    s_add_i32 s16, s0, s1
-; GCN-NEXT:    v_mov_b32_e32 v0, s16
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GCN-NEXT:    v_mul_hi_u32 v0, s12, v0
-; GCN-NEXT:    s_or_b32 s0, s0, s1
-; GCN-NEXT:    s_cmp_lg_u32 s0, 0
-; GCN-NEXT:    s_addc_u32 s14, s14, s15
-; GCN-NEXT:    s_mul_i32 s0, s12, s14
-; GCN-NEXT:    v_readfirstlane_b32 s1, v0
-; GCN-NEXT:    s_add_i32 s0, s1, s0
-; GCN-NEXT:    s_mul_i32 s13, s13, s16
-; GCN-NEXT:    s_mul_i32 s1, s12, s16
-; GCN-NEXT:    s_add_i32 s0, s0, s13
-; GCN-NEXT:    v_mov_b32_e32 v2, s1
-; GCN-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-NEXT:    v_mul_hi_u32 v3, s14, v2
-; GCN-NEXT:    v_mul_hi_u32 v2, s16, v2
-; GCN-NEXT:    v_mul_hi_u32 v1, s14, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s16, v0
-; GCN-NEXT:    s_mul_i32 s13, s16, s0
-; GCN-NEXT:    v_readfirstlane_b32 s17, v2
-; GCN-NEXT:    s_add_u32 s13, s17, s13
-; GCN-NEXT:    v_readfirstlane_b32 s15, v0
-; GCN-NEXT:    s_mul_i32 s1, s14, s1
-; GCN-NEXT:    s_addc_u32 s15, 0, s15
-; GCN-NEXT:    v_readfirstlane_b32 s12, v3
-; GCN-NEXT:    s_add_u32 s1, s13, s1
-; GCN-NEXT:    s_addc_u32 s1, s15, s12
+; GCN-NEXT:    v_mul_hi_u32 v2, s0, v0
 ; GCN-NEXT:    v_readfirstlane_b32 s12, v1
-; GCN-NEXT:    s_addc_u32 s12, s12, 0
-; GCN-NEXT:    s_mul_i32 s0, s14, s0
-; GCN-NEXT:    s_add_u32 s0, s1, s0
-; GCN-NEXT:    s_addc_u32 s12, 0, s12
-; GCN-NEXT:    s_add_i32 s15, s16, s0
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GCN-NEXT:    s_or_b32 s0, s0, s1
-; GCN-NEXT:    s_cmp_lg_u32 s0, 0
-; GCN-NEXT:    s_addc_u32 s14, s14, s12
+; GCN-NEXT:    v_readfirstlane_b32 s2, v0
+; GCN-NEXT:    s_mul_i32 s13, s0, s12
+; GCN-NEXT:    v_readfirstlane_b32 s16, v2
+; GCN-NEXT:    s_mul_i32 s14, s1, s2
+; GCN-NEXT:    s_mul_i32 s15, s0, s2
+; GCN-NEXT:    s_add_i32 s13, s16, s13
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s15
+; GCN-NEXT:    s_add_i32 s13, s13, s14
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s13
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s15
+; GCN-NEXT:    v_readfirstlane_b32 s14, v3
+; GCN-NEXT:    s_mul_i32 s16, s2, s13
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s13
+; GCN-NEXT:    s_add_u32 s14, s14, s16
+; GCN-NEXT:    v_readfirstlane_b32 s16, v0
+; GCN-NEXT:    s_mul_i32 s15, s12, s15
+; GCN-NEXT:    s_addc_u32 s16, 0, s16
+; GCN-NEXT:    v_readfirstlane_b32 s17, v4
+; GCN-NEXT:    s_add_u32 s14, s14, s15
+; GCN-NEXT:    s_addc_u32 s14, s16, s17
+; GCN-NEXT:    v_readfirstlane_b32 s15, v1
+; GCN-NEXT:    s_addc_u32 s15, s15, 0
+; GCN-NEXT:    s_mul_i32 s13, s12, s13
+; GCN-NEXT:    s_add_u32 s13, s14, s13
+; GCN-NEXT:    s_addc_u32 s14, 0, s15
+; GCN-NEXT:    s_add_i32 s13, s2, s13
+; GCN-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-NEXT:    v_mul_hi_u32 v0, s0, v0
+; GCN-NEXT:    s_addc_u32 s12, s12, s14
+; GCN-NEXT:    s_mul_i32 s14, s0, s12
+; GCN-NEXT:    s_mul_i32 s1, s1, s13
+; GCN-NEXT:    v_readfirstlane_b32 s15, v0
+; GCN-NEXT:    s_add_i32 s14, s15, s14
+; GCN-NEXT:    s_mul_i32 s0, s0, s13
+; GCN-NEXT:    s_add_i32 s1, s14, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mul_hi_u32 v3, s12, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s13, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s12, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s13, v0
+; GCN-NEXT:    s_mul_i32 s15, s13, s1
+; GCN-NEXT:    v_readfirstlane_b32 s17, v2
+; GCN-NEXT:    s_add_u32 s15, s17, s15
+; GCN-NEXT:    v_readfirstlane_b32 s16, v0
+; GCN-NEXT:    s_mul_i32 s0, s12, s0
+; GCN-NEXT:    s_addc_u32 s16, 0, s16
+; GCN-NEXT:    v_readfirstlane_b32 s14, v3
+; GCN-NEXT:    s_add_u32 s0, s15, s0
+; GCN-NEXT:    s_addc_u32 s0, s16, s14
+; GCN-NEXT:    v_readfirstlane_b32 s14, v1
+; GCN-NEXT:    s_addc_u32 s14, s14, 0
+; GCN-NEXT:    s_mul_i32 s1, s12, s1
+; GCN-NEXT:    s_add_u32 s0, s0, s1
+; GCN-NEXT:    s_addc_u32 s1, 0, s14
+; GCN-NEXT:    s_add_i32 s14, s13, s0
+; GCN-NEXT:    s_addc_u32 s15, s12, s1
 ; GCN-NEXT:    s_ashr_i32 s12, s7, 31
 ; GCN-NEXT:    s_add_u32 s0, s6, s12
 ; GCN-NEXT:    s_mov_b32 s13, s12
 ; GCN-NEXT:    s_addc_u32 s1, s7, s12
 ; GCN-NEXT:    s_xor_b64 s[6:7], s[0:1], s[12:13]
-; GCN-NEXT:    v_mov_b32_e32 v0, s14
+; GCN-NEXT:    v_mov_b32_e32 v0, s15
 ; GCN-NEXT:    v_mul_hi_u32 v1, s6, v0
-; GCN-NEXT:    v_mov_b32_e32 v2, s15
+; GCN-NEXT:    v_mov_b32_e32 v2, s14
 ; GCN-NEXT:    v_mul_hi_u32 v3, s6, v2
 ; GCN-NEXT:    s_mov_b32 s0, s4
 ; GCN-NEXT:    v_readfirstlane_b32 s4, v1
 ; GCN-NEXT:    v_mul_hi_u32 v1, s7, v2
-; GCN-NEXT:    s_mul_i32 s1, s6, s14
+; GCN-NEXT:    s_mul_i32 s1, s6, s15
 ; GCN-NEXT:    v_readfirstlane_b32 s16, v3
 ; GCN-NEXT:    v_mul_hi_u32 v0, s7, v0
 ; GCN-NEXT:    s_add_u32 s1, s16, s1
 ; GCN-NEXT:    s_addc_u32 s4, 0, s4
-; GCN-NEXT:    s_mul_i32 s15, s7, s15
+; GCN-NEXT:    s_mul_i32 s14, s7, s14
 ; GCN-NEXT:    v_readfirstlane_b32 s16, v1
-; GCN-NEXT:    s_add_u32 s1, s1, s15
+; GCN-NEXT:    s_add_u32 s1, s1, s14
 ; GCN-NEXT:    s_addc_u32 s1, s4, s16
 ; GCN-NEXT:    v_readfirstlane_b32 s4, v0
 ; GCN-NEXT:    s_addc_u32 s4, s4, 0
-; GCN-NEXT:    s_mul_i32 s14, s7, s14
+; GCN-NEXT:    s_mul_i32 s14, s7, s15
 ; GCN-NEXT:    s_add_u32 s14, s1, s14
 ; GCN-NEXT:    v_mov_b32_e32 v0, s14
 ; GCN-NEXT:    v_mul_hi_u32 v0, s10, v0
@@ -130,43 +123,40 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    s_mul_i32 s4, s10, s14
 ; GCN-NEXT:    s_sub_i32 s6, s6, s4
 ; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GCN-NEXT:    s_or_b32 s18, s4, s5
-; GCN-NEXT:    s_cmp_lg_u32 s18, 0
 ; GCN-NEXT:    s_subb_u32 s17, s17, s11
-; GCN-NEXT:    s_sub_i32 s19, s6, s10
-; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_sub_i32 s18, s6, s10
+; GCN-NEXT:    s_subb_u32 s17, s17, 0
+; GCN-NEXT:    s_cmp_ge_u32 s17, s11
+; GCN-NEXT:    s_cselect_b32 s19, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s18, s10
+; GCN-NEXT:    s_cselect_b32 s18, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s17, s11
+; GCN-NEXT:    s_cselect_b32 s17, s18, s19
+; GCN-NEXT:    s_add_u32 s18, s14, 1
+; GCN-NEXT:    s_addc_u32 s19, s15, 0
+; GCN-NEXT:    s_add_u32 s20, s14, 2
+; GCN-NEXT:    s_addc_u32 s21, s15, 0
+; GCN-NEXT:    s_cmp_lg_u32 s17, 0
+; GCN-NEXT:    s_cselect_b32 s17, s20, s18
+; GCN-NEXT:    s_cselect_b32 s18, s21, s19
 ; GCN-NEXT:    s_or_b32 s4, s4, s5
 ; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_subb_u32 s4, s17, 0
+; GCN-NEXT:    s_subb_u32 s4, s7, s16
 ; GCN-NEXT:    s_cmp_ge_u32 s4, s11
 ; GCN-NEXT:    s_cselect_b32 s5, -1, 0
-; GCN-NEXT:    s_cmp_ge_u32 s19, s10
-; GCN-NEXT:    s_cselect_b32 s17, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s4, s11
-; GCN-NEXT:    s_cselect_b32 s4, s17, s5
-; GCN-NEXT:    s_add_u32 s5, s14, 1
-; GCN-NEXT:    s_addc_u32 s17, s15, 0
-; GCN-NEXT:    s_add_u32 s19, s14, 2
-; GCN-NEXT:    s_addc_u32 s20, s15, 0
-; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_cselect_b32 s4, s19, s5
-; GCN-NEXT:    s_cselect_b32 s5, s20, s17
-; GCN-NEXT:    s_cmp_lg_u32 s18, 0
-; GCN-NEXT:    s_subb_u32 s7, s7, s16
-; GCN-NEXT:    s_cmp_ge_u32 s7, s11
-; GCN-NEXT:    s_cselect_b32 s16, -1, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s6, s10
 ; GCN-NEXT:    s_cselect_b32 s6, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s7, s11
-; GCN-NEXT:    s_cselect_b32 s6, s6, s16
-; GCN-NEXT:    s_cmp_lg_u32 s6, 0
-; GCN-NEXT:    s_cselect_b32 s5, s5, s15
-; GCN-NEXT:    s_cselect_b32 s4, s4, s14
+; GCN-NEXT:    s_cmp_eq_u32 s4, s11
+; GCN-NEXT:    s_cselect_b32 s4, s6, s5
+; GCN-NEXT:    s_cmp_lg_u32 s4, 0
+; GCN-NEXT:    s_cselect_b32 s5, s18, s15
+; GCN-NEXT:    s_cselect_b32 s4, s17, s14
 ; GCN-NEXT:    s_xor_b64 s[6:7], s[12:13], s[8:9]
 ; GCN-NEXT:    s_xor_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-NEXT:    s_sub_u32 s4, s4, s6
 ; GCN-NEXT:    s_subb_u32 s5, s5, s7
 ; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    v_mov_b32_e32 v1, s5
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm
@@ -205,9 +195,9 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s18, s16, 1
-; GCN-IR-NEXT:    s_addc_u32 s19, s17, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
+; GCN-IR-NEXT:    s_add_i32 s18, s16, 1
+; GCN-IR-NEXT:    s_addc_u32 s10, s17, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[10:11], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s16, 63, s16
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[12:13], s16
@@ -217,9 +207,9 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_add_u32 s18, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s19, s3, -1
 ; GCN-IR-NEXT:    s_not_b64 s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_add_u32 s12, s8, s20
-; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s14, s8, s20
+; GCN-IR-NEXT:    s_addc_u32 s15, s9, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -227,19 +217,19 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_lshr_b32 s8, s11, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[14:15], s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
 ; GCN-IR-NEXT:    s_sub_u32 s8, s18, s16
 ; GCN-IR-NEXT:    s_subb_u32 s8, s19, s17
-; GCN-IR-NEXT:    s_ashr_i32 s14, s8, 31
-; GCN-IR-NEXT:    s_mov_b32 s15, s14
-; GCN-IR-NEXT:    s_and_b32 s8, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s16, s16, s14
-; GCN-IR-NEXT:    s_subb_u32 s17, s17, s15
-; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[8:9]
+; GCN-IR-NEXT:    s_ashr_i32 s12, s8, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s8, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[20:21], s[12:13], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s16, s16, s20
+; GCN-IR-NEXT:    s_subb_u32 s17, s17, s21
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 1
+; GCN-IR-NEXT:    s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[8:9]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow7
@@ -1152,8 +1142,7 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s6
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s7
 ; GCN-NEXT:    s_sub_u32 s2, 0, s6
-; GCN-NEXT:    s_subb_u32 s10, 0, s7
-; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_subb_u32 s8, 0, s7
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -1163,74 +1152,68 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_mul_hi_u32 v2, s2, v0
-; GCN-NEXT:    v_readfirstlane_b32 s11, v1
-; GCN-NEXT:    v_readfirstlane_b32 s8, v0
-; GCN-NEXT:    s_mul_i32 s9, s2, s11
-; GCN-NEXT:    v_readfirstlane_b32 s14, v2
-; GCN-NEXT:    s_mul_i32 s12, s10, s8
-; GCN-NEXT:    s_mul_i32 s13, s2, s8
-; GCN-NEXT:    s_add_i32 s9, s14, s9
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s13
-; GCN-NEXT:    s_add_i32 s9, s9, s12
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s9
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, s13
-; GCN-NEXT:    v_readfirstlane_b32 s12, v3
-; GCN-NEXT:    s_mul_i32 s15, s8, s9
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, s9
-; GCN-NEXT:    s_add_u32 s12, s12, s15
-; GCN-NEXT:    v_readfirstlane_b32 s15, v0
-; GCN-NEXT:    s_mul_i32 s13, s11, s13
-; GCN-NEXT:    s_addc_u32 s15, 0, s15
-; GCN-NEXT:    v_readfirstlane_b32 s14, v4
-; GCN-NEXT:    s_add_u32 s12, s12, s13
-; GCN-NEXT:    s_addc_u32 s12, s15, s14
-; GCN-NEXT:    v_readfirstlane_b32 s13, v1
-; GCN-NEXT:    s_addc_u32 s13, s13, 0
-; GCN-NEXT:    s_mul_i32 s9, s11, s9
-; GCN-NEXT:    s_add_u32 s9, s12, s9
-; GCN-NEXT:    s_addc_u32 s12, 0, s13
-; GCN-NEXT:    s_add_i32 s13, s8, s9
-; GCN-NEXT:    v_mov_b32_e32 v0, s13
-; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GCN-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NEXT:    v_readfirstlane_b32 s3, v0
+; GCN-NEXT:    s_mul_i32 s10, s2, s9
+; GCN-NEXT:    v_readfirstlane_b32 s13, v2
+; GCN-NEXT:    s_mul_i32 s11, s8, s3
+; GCN-NEXT:    s_mul_i32 s12, s2, s3
+; GCN-NEXT:    s_add_i32 s10, s13, s10
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s12
+; GCN-NEXT:    s_add_i32 s10, s10, s11
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s10
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s12
+; GCN-NEXT:    v_readfirstlane_b32 s11, v3
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s10
+; GCN-NEXT:    s_mul_i32 s14, s3, s10
+; GCN-NEXT:    s_add_u32 s11, s11, s14
+; GCN-NEXT:    v_readfirstlane_b32 s14, v0
+; GCN-NEXT:    s_mul_i32 s12, s9, s12
+; GCN-NEXT:    s_addc_u32 s14, 0, s14
+; GCN-NEXT:    v_readfirstlane_b32 s13, v4
+; GCN-NEXT:    s_add_u32 s11, s11, s12
+; GCN-NEXT:    v_readfirstlane_b32 s15, v1
+; GCN-NEXT:    s_addc_u32 s11, s14, s13
+; GCN-NEXT:    s_addc_u32 s12, s15, 0
+; GCN-NEXT:    s_mul_i32 s10, s9, s10
+; GCN-NEXT:    s_add_u32 s10, s11, s10
+; GCN-NEXT:    s_addc_u32 s11, 0, s12
+; GCN-NEXT:    s_add_i32 s10, s3, s10
+; GCN-NEXT:    v_mov_b32_e32 v0, s10
 ; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
-; GCN-NEXT:    s_or_b32 s8, s8, s9
-; GCN-NEXT:    s_cmp_lg_u32 s8, 0
-; GCN-NEXT:    s_addc_u32 s11, s11, s12
-; GCN-NEXT:    s_mul_i32 s8, s2, s11
-; GCN-NEXT:    v_readfirstlane_b32 s9, v0
-; GCN-NEXT:    s_add_i32 s8, s9, s8
-; GCN-NEXT:    s_mul_i32 s10, s10, s13
-; GCN-NEXT:    s_mul_i32 s2, s2, s13
-; GCN-NEXT:    s_add_i32 s8, s8, s10
+; GCN-NEXT:    s_addc_u32 s9, s9, s11
+; GCN-NEXT:    s_mul_i32 s11, s2, s9
+; GCN-NEXT:    s_mul_i32 s8, s8, s10
+; GCN-NEXT:    v_readfirstlane_b32 s12, v0
+; GCN-NEXT:    s_add_i32 s11, s12, s11
+; GCN-NEXT:    s_mul_i32 s2, s2, s10
+; GCN-NEXT:    s_add_i32 s8, s11, s8
 ; GCN-NEXT:    v_mov_b32_e32 v2, s2
 ; GCN-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-NEXT:    v_mul_hi_u32 v3, s11, v2
-; GCN-NEXT:    v_mul_hi_u32 v2, s13, v2
-; GCN-NEXT:    v_mul_hi_u32 v1, s11, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s13, v0
-; GCN-NEXT:    s_mul_i32 s10, s13, s8
+; GCN-NEXT:    v_mul_hi_u32 v3, s9, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s10, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s9, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s10, v0
+; GCN-NEXT:    s_mul_i32 s12, s10, s8
 ; GCN-NEXT:    v_readfirstlane_b32 s14, v2
-; GCN-NEXT:    s_add_u32 s10, s14, s10
-; GCN-NEXT:    v_readfirstlane_b32 s12, v0
-; GCN-NEXT:    s_mul_i32 s2, s11, s2
-; GCN-NEXT:    s_addc_u32 s12, 0, s12
-; GCN-NEXT:    v_readfirstlane_b32 s9, v3
-; GCN-NEXT:    s_add_u32 s2, s10, s2
-; GCN-NEXT:    s_addc_u32 s2, s12, s9
-; GCN-NEXT:    v_readfirstlane_b32 s9, v1
-; GCN-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-NEXT:    s_mul_i32 s8, s11, s8
+; GCN-NEXT:    s_add_u32 s12, s14, s12
+; GCN-NEXT:    v_readfirstlane_b32 s13, v0
+; GCN-NEXT:    s_mul_i32 s2, s9, s2
+; GCN-NEXT:    s_addc_u32 s13, 0, s13
+; GCN-NEXT:    v_readfirstlane_b32 s11, v3
+; GCN-NEXT:    s_add_u32 s2, s12, s2
+; GCN-NEXT:    s_addc_u32 s2, s13, s11
+; GCN-NEXT:    v_readfirstlane_b32 s11, v1
+; GCN-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-NEXT:    s_mul_i32 s8, s9, s8
 ; GCN-NEXT:    s_add_u32 s2, s2, s8
-; GCN-NEXT:    s_addc_u32 s10, 0, s9
-; GCN-NEXT:    s_add_i32 s13, s13, s2
-; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GCN-NEXT:    s_or_b32 s2, s8, s9
-; GCN-NEXT:    s_cmp_lg_u32 s2, 0
-; GCN-NEXT:    s_addc_u32 s8, s11, s10
-; GCN-NEXT:    v_mul_hi_u32 v1, s13, 24
+; GCN-NEXT:    s_addc_u32 s8, 0, s11
+; GCN-NEXT:    s_add_i32 s10, s10, s2
+; GCN-NEXT:    s_addc_u32 s8, s9, s8
+; GCN-NEXT:    v_mul_hi_u32 v1, s10, 24
 ; GCN-NEXT:    v_mul_hi_u32 v0, s8, 24
 ; GCN-NEXT:    s_mul_i32 s8, s8, 24
-; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    v_readfirstlane_b32 s10, v1
 ; GCN-NEXT:    v_readfirstlane_b32 s9, v0
 ; GCN-NEXT:    s_add_u32 s8, s10, s8
@@ -1238,44 +1221,41 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-NEXT:    v_mov_b32_e32 v0, s10
 ; GCN-NEXT:    v_mul_hi_u32 v0, s6, v0
 ; GCN-NEXT:    s_mul_i32 s8, s7, s10
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    v_readfirstlane_b32 s9, v0
 ; GCN-NEXT:    s_add_i32 s11, s9, s8
 ; GCN-NEXT:    s_sub_i32 s12, 0, s11
 ; GCN-NEXT:    s_mul_i32 s8, s6, s10
 ; GCN-NEXT:    s_sub_i32 s13, 24, s8
 ; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GCN-NEXT:    s_or_b32 s14, s8, s9
-; GCN-NEXT:    s_cmp_lg_u32 s14, 0
 ; GCN-NEXT:    s_subb_u32 s12, s12, s7
-; GCN-NEXT:    s_sub_i32 s15, s13, s6
-; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GCN-NEXT:    s_sub_i32 s14, s13, s6
+; GCN-NEXT:    s_subb_u32 s12, s12, 0
+; GCN-NEXT:    s_cmp_ge_u32 s12, s7
+; GCN-NEXT:    s_cselect_b32 s15, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s14, s6
+; GCN-NEXT:    s_cselect_b32 s14, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s12, s7
+; GCN-NEXT:    s_cselect_b32 s12, s14, s15
+; GCN-NEXT:    s_add_u32 s14, s10, 1
+; GCN-NEXT:    s_addc_u32 s15, 0, 0
+; GCN-NEXT:    s_add_u32 s16, s10, 2
+; GCN-NEXT:    s_addc_u32 s17, 0, 0
+; GCN-NEXT:    s_cmp_lg_u32 s12, 0
+; GCN-NEXT:    s_cselect_b32 s12, s16, s14
+; GCN-NEXT:    s_cselect_b32 s14, s17, s15
 ; GCN-NEXT:    s_or_b32 s8, s8, s9
 ; GCN-NEXT:    s_cmp_lg_u32 s8, 0
-; GCN-NEXT:    s_subb_u32 s8, s12, 0
+; GCN-NEXT:    s_subb_u32 s8, 0, s11
 ; GCN-NEXT:    s_cmp_ge_u32 s8, s7
 ; GCN-NEXT:    s_cselect_b32 s9, -1, 0
-; GCN-NEXT:    s_cmp_ge_u32 s15, s6
-; GCN-NEXT:    s_cselect_b32 s12, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s8, s7
-; GCN-NEXT:    s_cselect_b32 s8, s12, s9
-; GCN-NEXT:    s_add_u32 s9, s10, 1
-; GCN-NEXT:    s_addc_u32 s12, 0, 0
-; GCN-NEXT:    s_add_u32 s15, s10, 2
-; GCN-NEXT:    s_addc_u32 s16, 0, 0
-; GCN-NEXT:    s_cmp_lg_u32 s8, 0
-; GCN-NEXT:    s_cselect_b32 s8, s15, s9
-; GCN-NEXT:    s_cselect_b32 s9, s16, s12
-; GCN-NEXT:    s_cmp_lg_u32 s14, 0
-; GCN-NEXT:    s_subb_u32 s11, 0, s11
-; GCN-NEXT:    s_cmp_ge_u32 s11, s7
-; GCN-NEXT:    s_cselect_b32 s12, -1, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s13, s6
 ; GCN-NEXT:    s_cselect_b32 s6, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s11, s7
-; GCN-NEXT:    s_cselect_b32 s6, s6, s12
+; GCN-NEXT:    s_cmp_eq_u32 s8, s7
+; GCN-NEXT:    s_cselect_b32 s6, s6, s9
 ; GCN-NEXT:    s_cmp_lg_u32 s6, 0
-; GCN-NEXT:    s_cselect_b32 s7, s9, 0
-; GCN-NEXT:    s_cselect_b32 s6, s8, s10
+; GCN-NEXT:    s_cselect_b32 s7, s14, 0
+; GCN-NEXT:    s_cselect_b32 s6, s12, s10
 ; GCN-NEXT:    s_xor_b64 s[6:7], s[6:7], s[4:5]
 ; GCN-NEXT:    s_sub_u32 s6, s6, s4
 ; GCN-NEXT:    s_subb_u32 s7, s7, s4
@@ -1294,34 +1274,34 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
 ; GCN-IR-NEXT:    s_sub_u32 s2, s2, s4
 ; GCN-IR-NEXT:    s_subb_u32 s3, s3, s4
-; GCN-IR-NEXT:    s_flbit_i32_b64 s14, s[2:3]
-; GCN-IR-NEXT:    s_add_u32 s10, s14, 0xffffffc5
+; GCN-IR-NEXT:    s_flbit_i32_b64 s16, s[2:3]
+; GCN-IR-NEXT:    s_add_u32 s10, s16, 0xffffffc5
 ; GCN-IR-NEXT:    s_addc_u32 s11, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[10:11], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[10:11], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[10:11], 63
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[8:9], s[12:13]
 ; GCN-IR-NEXT:    s_and_b64 s[8:9], s[12:13], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[16:17]
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[14:15]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s12, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s13, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[12:13], 0
+; GCN-IR-NEXT:    s_add_i32 s12, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s8, s11, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], 24, s10
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[12:13], 24, s12
-; GCN-IR-NEXT:    s_add_u32 s16, s2, -1
-; GCN-IR-NEXT:    s_addc_u32 s17, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s10, 58, s14
-; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
+; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
+; GCN-IR-NEXT:    s_sub_u32 s16, 58, s16
+; GCN-IR-NEXT:    s_subb_u32 s17, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -1329,19 +1309,19 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_lshr_b32 s6, s9, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[6:7]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s6, s16, s12
-; GCN-IR-NEXT:    s_subb_u32 s6, s17, s13
-; GCN-IR-NEXT:    s_ashr_i32 s14, s6, 31
-; GCN-IR-NEXT:    s_mov_b32 s15, s14
-; GCN-IR-NEXT:    s_and_b32 s6, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s6, s14, s12
+; GCN-IR-NEXT:    s_subb_u32 s6, s15, s13
+; GCN-IR-NEXT:    s_ashr_i32 s10, s6, 31
+; GCN-IR-NEXT:    s_mov_b32 s11, s10
+; GCN-IR-NEXT:    s_and_b32 s6, s10, 1
+; GCN-IR-NEXT:    s_and_b64 s[18:19], s[10:11], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT:    s_add_i32 s16, s16, 1
+; GCN-IR-NEXT:    s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_3
 ; GCN-IR-NEXT:  .LBB10_4: ; %Flow6
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index fce960038444a..e7b7a161de85e 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -1513,7 +1513,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s9
 ; GCN-NEXT:    s_sub_u32 s3, 0, s8
-; GCN-NEXT:    s_subb_u32 s12, 0, s9
+; GCN-NEXT:    s_subb_u32 s10, 0, s9
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -1522,75 +1522,71 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_readfirstlane_b32 s13, v1
-; GCN-NEXT:    v_readfirstlane_b32 s10, v0
-; GCN-NEXT:    s_mul_i32 s11, s3, s13
-; GCN-NEXT:    s_mul_hi_u32 s15, s3, s10
-; GCN-NEXT:    s_mul_i32 s14, s12, s10
-; GCN-NEXT:    s_add_i32 s11, s15, s11
-; GCN-NEXT:    s_add_i32 s11, s11, s14
-; GCN-NEXT:    s_mul_i32 s16, s3, s10
-; GCN-NEXT:    s_mul_i32 s15, s10, s11
-; GCN-NEXT:    s_mul_hi_u32 s17, s10, s16
-; GCN-NEXT:    s_mul_hi_u32 s14, s10, s11
+; GCN-NEXT:    v_readfirstlane_b32 s11, v1
+; GCN-NEXT:    v_readfirstlane_b32 s12, v0
+; GCN-NEXT:    s_mul_i32 s13, s3, s11
+; GCN-NEXT:    s_mul_hi_u32 s15, s3, s12
+; GCN-NEXT:    s_mul_i32 s14, s10, s12
+; GCN-NEXT:    s_add_i32 s13, s15, s13
+; GCN-NEXT:    s_add_i32 s13, s13, s14
+; GCN-NEXT:    s_mul_i32 s16, s3, s12
+; GCN-NEXT:    s_mul_i32 s15, s12, s13
+; GCN-NEXT:    s_mul_hi_u32 s17, s12, s16
+; GCN-NEXT:    s_mul_hi_u32 s14, s12, s13
 ; GCN-NEXT:    s_add_u32 s15, s17, s15
 ; GCN-NEXT:    s_addc_u32 s14, 0, s14
-; GCN-NEXT:    s_mul_hi_u32 s18, s13, s16
-; GCN-NEXT:    s_mul_i32 s16, s13, s16
+; GCN-NEXT:    s_mul_hi_u32 s18, s11, s16
+; GCN-NEXT:    s_mul_i32 s16, s11, s16
 ; GCN-NEXT:    s_add_u32 s15, s15, s16
-; GCN-NEXT:    s_mul_hi_u32 s17, s13, s11
+; GCN-NEXT:    s_mul_hi_u32 s17, s11, s13
 ; GCN-NEXT:    s_addc_u32 s14, s14, s18
 ; GCN-NEXT:    s_addc_u32 s15, s17, 0
-; GCN-NEXT:    s_mul_i32 s11, s13, s11
-; GCN-NEXT:    s_add_u32 s11, s14, s11
+; GCN-NEXT:    s_mul_i32 s13, s11, s13
+; GCN-NEXT:    s_add_u32 s13, s14, s13
 ; GCN-NEXT:    s_addc_u32 s14, 0, s15
-; GCN-NEXT:    s_add_i32 s15, s10, s11
-; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; GCN-NEXT:    s_addc_u32 s13, s13, s14
-; GCN-NEXT:    s_mul_i32 s10, s3, s13
-; GCN-NEXT:    s_mul_hi_u32 s11, s3, s15
-; GCN-NEXT:    s_add_i32 s10, s11, s10
-; GCN-NEXT:    s_mul_i32 s12, s12, s15
-; GCN-NEXT:    s_add_i32 s10, s10, s12
-; GCN-NEXT:    s_mul_i32 s3, s3, s15
-; GCN-NEXT:    s_mul_hi_u32 s12, s13, s3
-; GCN-NEXT:    s_mul_i32 s14, s13, s3
-; GCN-NEXT:    s_mul_i32 s17, s15, s10
-; GCN-NEXT:    s_mul_hi_u32 s3, s15, s3
-; GCN-NEXT:    s_mul_hi_u32 s16, s15, s10
+; GCN-NEXT:    s_add_i32 s12, s12, s13
+; GCN-NEXT:    s_addc_u32 s11, s11, s14
+; GCN-NEXT:    s_mul_i32 s13, s3, s11
+; GCN-NEXT:    s_mul_hi_u32 s14, s3, s12
+; GCN-NEXT:    s_add_i32 s13, s14, s13
+; GCN-NEXT:    s_mul_i32 s10, s10, s12
+; GCN-NEXT:    s_add_i32 s13, s13, s10
+; GCN-NEXT:    s_mul_i32 s3, s3, s12
+; GCN-NEXT:    s_mul_hi_u32 s14, s11, s3
+; GCN-NEXT:    s_mul_i32 s15, s11, s3
+; GCN-NEXT:    s_mul_i32 s17, s12, s13
+; GCN-NEXT:    s_mul_hi_u32 s3, s12, s3
+; GCN-NEXT:    s_mul_hi_u32 s16, s12, s13
 ; GCN-NEXT:    s_add_u32 s3, s3, s17
 ; GCN-NEXT:    s_addc_u32 s16, 0, s16
-; GCN-NEXT:    s_add_u32 s3, s3, s14
-; GCN-NEXT:    s_mul_hi_u32 s11, s13, s10
-; GCN-NEXT:    s_addc_u32 s3, s16, s12
-; GCN-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-NEXT:    s_mul_i32 s10, s13, s10
-; GCN-NEXT:    s_add_u32 s3, s3, s10
-; GCN-NEXT:    s_addc_u32 s12, 0, s11
-; GCN-NEXT:    s_add_i32 s15, s15, s3
-; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[10:11], 0
-; GCN-NEXT:    s_addc_u32 s3, s13, s12
+; GCN-NEXT:    s_add_u32 s3, s3, s15
+; GCN-NEXT:    s_mul_hi_u32 s10, s11, s13
+; GCN-NEXT:    s_addc_u32 s3, s16, s14
+; GCN-NEXT:    s_addc_u32 s10, s10, 0
+; GCN-NEXT:    s_mul_i32 s13, s11, s13
+; GCN-NEXT:    s_add_u32 s3, s3, s13
+; GCN-NEXT:    s_addc_u32 s10, 0, s10
+; GCN-NEXT:    s_add_i32 s3, s12, s3
+; GCN-NEXT:    s_addc_u32 s14, s11, s10
 ; GCN-NEXT:    s_ashr_i32 s10, s5, 31
 ; GCN-NEXT:    s_add_u32 s12, s4, s10
 ; GCN-NEXT:    s_mov_b32 s11, s10
 ; GCN-NEXT:    s_addc_u32 s13, s5, s10
 ; GCN-NEXT:    s_xor_b64 s[12:13], s[12:13], s[10:11]
-; GCN-NEXT:    s_mul_i32 s14, s12, s3
-; GCN-NEXT:    s_mul_hi_u32 s16, s12, s15
-; GCN-NEXT:    s_mul_hi_u32 s5, s12, s3
-; GCN-NEXT:    s_add_u32 s14, s16, s14
+; GCN-NEXT:    s_mul_i32 s15, s12, s14
+; GCN-NEXT:    s_mul_hi_u32 s16, s12, s3
+; GCN-NEXT:    s_mul_hi_u32 s5, s12, s14
+; GCN-NEXT:    s_add_u32 s15, s16, s15
 ; GCN-NEXT:    s_addc_u32 s5, 0, s5
-; GCN-NEXT:    s_mul_hi_u32 s17, s13, s15
-; GCN-NEXT:    s_mul_i32 s15, s13, s15
-; GCN-NEXT:    s_add_u32 s14, s14, s15
-; GCN-NEXT:    s_mul_hi_u32 s16, s13, s3
-; GCN-NEXT:    s_addc_u32 s5, s5, s17
-; GCN-NEXT:    s_addc_u32 s14, s16, 0
+; GCN-NEXT:    s_mul_hi_u32 s17, s13, s3
 ; GCN-NEXT:    s_mul_i32 s3, s13, s3
-; GCN-NEXT:    s_add_u32 s3, s5, s3
-; GCN-NEXT:    s_addc_u32 s5, 0, s14
+; GCN-NEXT:    s_add_u32 s3, s15, s3
+; GCN-NEXT:    s_mul_hi_u32 s16, s13, s14
+; GCN-NEXT:    s_addc_u32 s3, s5, s17
+; GCN-NEXT:    s_addc_u32 s5, s16, 0
+; GCN-NEXT:    s_mul_i32 s14, s13, s14
+; GCN-NEXT:    s_add_u32 s3, s3, s14
+; GCN-NEXT:    s_addc_u32 s5, 0, s5
 ; GCN-NEXT:    s_mul_i32 s5, s8, s5
 ; GCN-NEXT:    s_mul_hi_u32 s14, s8, s3
 ; GCN-NEXT:    s_add_i32 s5, s14, s5
@@ -1600,11 +1596,9 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; GCN-NEXT:    s_mul_i32 s3, s8, s3
 ; GCN-NEXT:    s_sub_i32 s3, s12, s3
 ; GCN-NEXT:    s_cselect_b64 s[14:15], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[14:15], 0
 ; GCN-NEXT:    s_subb_u32 s12, s16, s9
 ; GCN-NEXT:    s_sub_i32 s18, s3, s8
 ; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; GCN-NEXT:    s_subb_u32 s19, s12, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s19, s9
 ; GCN-NEXT:    s_cselect_b32 s20, -1, 0
@@ -1614,12 +1608,10 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; GCN-NEXT:    s_cselect_b32 s20, s21, s20
 ; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; GCN-NEXT:    s_subb_u32 s12, s12, s9
-; GCN-NEXT:    s_sub_i32 s21, s18, s8
-; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; GCN-NEXT:    s_sub_i32 s16, s18, s8
 ; GCN-NEXT:    s_subb_u32 s12, s12, 0
 ; GCN-NEXT:    s_cmp_lg_u32 s20, 0
-; GCN-NEXT:    s_cselect_b32 s16, s21, s18
+; GCN-NEXT:    s_cselect_b32 s16, s16, s18
 ; GCN-NEXT:    s_cselect_b32 s12, s12, s19
 ; GCN-NEXT:    s_cmp_lg_u64 s[14:15], 0
 ; GCN-NEXT:    s_subb_u32 s5, s13, s5
@@ -1931,11 +1923,9 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; TONGA-NEXT:    v_readfirstlane_b32 s14, v0
 ; TONGA-NEXT:    s_sub_i32 s12, s12, s14
 ; TONGA-NEXT:    s_cselect_b64 s[14:15], 1, 0
-; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
 ; TONGA-NEXT:    s_subb_u32 s3, s3, s7
 ; TONGA-NEXT:    s_sub_i32 s18, s12, s6
 ; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; TONGA-NEXT:    s_subb_u32 s19, s3, 0
 ; TONGA-NEXT:    s_cmp_ge_u32 s19, s7
 ; TONGA-NEXT:    s_cselect_b32 s20, -1, 0
@@ -1945,12 +1935,10 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
 ; TONGA-NEXT:    s_cselect_b32 s20, s21, s20
 ; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; TONGA-NEXT:    s_subb_u32 s3, s3, s7
-; TONGA-NEXT:    s_sub_i32 s21, s18, s6
-; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_sub_i32 s16, s18, s6
 ; TONGA-NEXT:    s_subb_u32 s3, s3, 0
 ; TONGA-NEXT:    s_cmp_lg_u32 s20, 0
-; TONGA-NEXT:    s_cselect_b32 s16, s21, s18
+; TONGA-NEXT:    s_cselect_b32 s16, s16, s18
 ; TONGA-NEXT:    s_cselect_b32 s3, s3, s19
 ; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
 ; TONGA-NEXT:    s_subb_u32 s5, s13, s5
@@ -2730,7 +2718,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s6
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s7
 ; GCN-NEXT:    s_sub_u32 s9, 0, s6
-; GCN-NEXT:    s_subb_u32 s16, 0, s7
+; GCN-NEXT:    s_subb_u32 s14, 0, s7
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2739,75 +2727,71 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_readfirstlane_b32 s17, v1
-; GCN-NEXT:    v_readfirstlane_b32 s14, v0
-; GCN-NEXT:    s_mul_i32 s15, s9, s17
-; GCN-NEXT:    s_mul_hi_u32 s19, s9, s14
-; GCN-NEXT:    s_mul_i32 s18, s16, s14
-; GCN-NEXT:    s_add_i32 s15, s19, s15
-; GCN-NEXT:    s_add_i32 s15, s15, s18
-; GCN-NEXT:    s_mul_i32 s20, s9, s14
-; GCN-NEXT:    s_mul_i32 s19, s14, s15
-; GCN-NEXT:    s_mul_hi_u32 s21, s14, s20
-; GCN-NEXT:    s_mul_hi_u32 s18, s14, s15
+; GCN-NEXT:    v_readfirstlane_b32 s15, v1
+; GCN-NEXT:    v_readfirstlane_b32 s16, v0
+; GCN-NEXT:    s_mul_i32 s17, s9, s15
+; GCN-NEXT:    s_mul_hi_u32 s19, s9, s16
+; GCN-NEXT:    s_mul_i32 s18, s14, s16
+; GCN-NEXT:    s_add_i32 s17, s19, s17
+; GCN-NEXT:    s_add_i32 s17, s17, s18
+; GCN-NEXT:    s_mul_i32 s20, s9, s16
+; GCN-NEXT:    s_mul_i32 s19, s16, s17
+; GCN-NEXT:    s_mul_hi_u32 s21, s16, s20
+; GCN-NEXT:    s_mul_hi_u32 s18, s16, s17
 ; GCN-NEXT:    s_add_u32 s19, s21, s19
 ; GCN-NEXT:    s_addc_u32 s18, 0, s18
-; GCN-NEXT:    s_mul_hi_u32 s22, s17, s20
-; GCN-NEXT:    s_mul_i32 s20, s17, s20
+; GCN-NEXT:    s_mul_hi_u32 s22, s15, s20
+; GCN-NEXT:    s_mul_i32 s20, s15, s20
 ; GCN-NEXT:    s_add_u32 s19, s19, s20
-; GCN-NEXT:    s_mul_hi_u32 s21, s17, s15
+; GCN-NEXT:    s_mul_hi_u32 s21, s15, s17
 ; GCN-NEXT:    s_addc_u32 s18, s18, s22
 ; GCN-NEXT:    s_addc_u32 s19, s21, 0
-; GCN-NEXT:    s_mul_i32 s15, s17, s15
-; GCN-NEXT:    s_add_u32 s15, s18, s15
+; GCN-NEXT:    s_mul_i32 s17, s15, s17
+; GCN-NEXT:    s_add_u32 s17, s18, s17
 ; GCN-NEXT:    s_addc_u32 s18, 0, s19
-; GCN-NEXT:    s_add_i32 s19, s14, s15
-; GCN-NEXT:    s_cselect_b64 s[14:15], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[14:15], 0
-; GCN-NEXT:    s_addc_u32 s17, s17, s18
-; GCN-NEXT:    s_mul_i32 s14, s9, s17
-; GCN-NEXT:    s_mul_hi_u32 s15, s9, s19
-; GCN-NEXT:    s_add_i32 s14, s15, s14
-; GCN-NEXT:    s_mul_i32 s16, s16, s19
-; GCN-NEXT:    s_add_i32 s14, s14, s16
-; GCN-NEXT:    s_mul_i32 s9, s9, s19
-; GCN-NEXT:    s_mul_hi_u32 s16, s17, s9
-; GCN-NEXT:    s_mul_i32 s18, s17, s9
-; GCN-NEXT:    s_mul_i32 s21, s19, s14
-; GCN-NEXT:    s_mul_hi_u32 s9, s19, s9
-; GCN-NEXT:    s_mul_hi_u32 s20, s19, s14
+; GCN-NEXT:    s_add_i32 s16, s16, s17
+; GCN-NEXT:    s_addc_u32 s15, s15, s18
+; GCN-NEXT:    s_mul_i32 s17, s9, s15
+; GCN-NEXT:    s_mul_hi_u32 s18, s9, s16
+; GCN-NEXT:    s_add_i32 s17, s18, s17
+; GCN-NEXT:    s_mul_i32 s14, s14, s16
+; GCN-NEXT:    s_add_i32 s17, s17, s14
+; GCN-NEXT:    s_mul_i32 s9, s9, s16
+; GCN-NEXT:    s_mul_hi_u32 s18, s15, s9
+; GCN-NEXT:    s_mul_i32 s19, s15, s9
+; GCN-NEXT:    s_mul_i32 s21, s16, s17
+; GCN-NEXT:    s_mul_hi_u32 s9, s16, s9
+; GCN-NEXT:    s_mul_hi_u32 s20, s16, s17
 ; GCN-NEXT:    s_add_u32 s9, s9, s21
 ; GCN-NEXT:    s_addc_u32 s20, 0, s20
-; GCN-NEXT:    s_add_u32 s9, s9, s18
-; GCN-NEXT:    s_mul_hi_u32 s15, s17, s14
-; GCN-NEXT:    s_addc_u32 s9, s20, s16
-; GCN-NEXT:    s_addc_u32 s15, s15, 0
-; GCN-NEXT:    s_mul_i32 s14, s17, s14
-; GCN-NEXT:    s_add_u32 s9, s9, s14
-; GCN-NEXT:    s_addc_u32 s16, 0, s15
-; GCN-NEXT:    s_add_i32 s19, s19, s9
-; GCN-NEXT:    s_cselect_b64 s[14:15], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[14:15], 0
-; GCN-NEXT:    s_addc_u32 s9, s17, s16
+; GCN-NEXT:    s_add_u32 s9, s9, s19
+; GCN-NEXT:    s_mul_hi_u32 s14, s15, s17
+; GCN-NEXT:    s_addc_u32 s9, s20, s18
+; GCN-NEXT:    s_addc_u32 s14, s14, 0
+; GCN-NEXT:    s_mul_i32 s17, s15, s17
+; GCN-NEXT:    s_add_u32 s9, s9, s17
+; GCN-NEXT:    s_addc_u32 s14, 0, s14
+; GCN-NEXT:    s_add_i32 s9, s16, s9
+; GCN-NEXT:    s_addc_u32 s18, s15, s14
 ; GCN-NEXT:    s_ashr_i32 s14, s11, 31
 ; GCN-NEXT:    s_add_u32 s16, s10, s14
 ; GCN-NEXT:    s_mov_b32 s15, s14
 ; GCN-NEXT:    s_addc_u32 s17, s11, s14
 ; GCN-NEXT:    s_xor_b64 s[16:17], s[16:17], s[14:15]
-; GCN-NEXT:    s_mul_i32 s18, s16, s9
-; GCN-NEXT:    s_mul_hi_u32 s20, s16, s19
-; GCN-NEXT:    s_mul_hi_u32 s11, s16, s9
-; GCN-NEXT:    s_add_u32 s18, s20, s18
+; GCN-NEXT:    s_mul_i32 s19, s16, s18
+; GCN-NEXT:    s_mul_hi_u32 s20, s16, s9
+; GCN-NEXT:    s_mul_hi_u32 s11, s16, s18
+; GCN-NEXT:    s_add_u32 s19, s20, s19
 ; GCN-NEXT:    s_addc_u32 s11, 0, s11
-; GCN-NEXT:    s_mul_hi_u32 s21, s17, s19
-; GCN-NEXT:    s_mul_i32 s19, s17, s19
-; GCN-NEXT:    s_add_u32 s18, s18, s19
-; GCN-NEXT:    s_mul_hi_u32 s20, s17, s9
-; GCN-NEXT:    s_addc_u32 s11, s11, s21
-; GCN-NEXT:    s_addc_u32 s18, s20, 0
+; GCN-NEXT:    s_mul_hi_u32 s21, s17, s9
 ; GCN-NEXT:    s_mul_i32 s9, s17, s9
-; GCN-NEXT:    s_add_u32 s9, s11, s9
-; GCN-NEXT:    s_addc_u32 s11, 0, s18
+; GCN-NEXT:    s_add_u32 s9, s19, s9
+; GCN-NEXT:    s_mul_hi_u32 s20, s17, s18
+; GCN-NEXT:    s_addc_u32 s9, s11, s21
+; GCN-NEXT:    s_addc_u32 s11, s20, 0
+; GCN-NEXT:    s_mul_i32 s18, s17, s18
+; GCN-NEXT:    s_add_u32 s9, s9, s18
+; GCN-NEXT:    s_addc_u32 s11, 0, s11
 ; GCN-NEXT:    s_mul_i32 s11, s6, s11
 ; GCN-NEXT:    s_mul_hi_u32 s18, s6, s9
 ; GCN-NEXT:    s_add_i32 s11, s18, s11
@@ -2817,11 +2801,9 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_mul_i32 s9, s6, s9
 ; GCN-NEXT:    s_sub_i32 s9, s16, s9
 ; GCN-NEXT:    s_cselect_b64 s[18:19], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
 ; GCN-NEXT:    s_subb_u32 s16, s20, s7
 ; GCN-NEXT:    s_sub_i32 s22, s9, s6
 ; GCN-NEXT:    s_cselect_b64 s[20:21], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
 ; GCN-NEXT:    s_subb_u32 s23, s16, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s23, s7
 ; GCN-NEXT:    s_cselect_b32 s24, -1, 0
@@ -2831,12 +2813,10 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_cselect_b32 s24, s25, s24
 ; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
 ; GCN-NEXT:    s_subb_u32 s16, s16, s7
-; GCN-NEXT:    s_sub_i32 s25, s22, s6
-; GCN-NEXT:    s_cselect_b64 s[20:21], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
+; GCN-NEXT:    s_sub_i32 s20, s22, s6
 ; GCN-NEXT:    s_subb_u32 s16, s16, 0
 ; GCN-NEXT:    s_cmp_lg_u32 s24, 0
-; GCN-NEXT:    s_cselect_b32 s20, s25, s22
+; GCN-NEXT:    s_cselect_b32 s20, s20, s22
 ; GCN-NEXT:    s_cselect_b32 s16, s16, s23
 ; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
 ; GCN-NEXT:    s_subb_u32 s11, s17, s11
@@ -2887,7 +2867,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s10
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s11
 ; GCN-NEXT:    s_sub_u32 s3, 0, s10
-; GCN-NEXT:    s_subb_u32 s14, 0, s11
+; GCN-NEXT:    s_subb_u32 s12, 0, s11
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2896,75 +2876,71 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_readfirstlane_b32 s15, v1
-; GCN-NEXT:    v_readfirstlane_b32 s12, v0
-; GCN-NEXT:    s_mul_i32 s13, s3, s15
-; GCN-NEXT:    s_mul_hi_u32 s17, s3, s12
-; GCN-NEXT:    s_mul_i32 s16, s14, s12
-; GCN-NEXT:    s_add_i32 s13, s17, s13
-; GCN-NEXT:    s_add_i32 s13, s13, s16
-; GCN-NEXT:    s_mul_i32 s18, s3, s12
-; GCN-NEXT:    s_mul_i32 s17, s12, s13
-; GCN-NEXT:    s_mul_hi_u32 s19, s12, s18
-; GCN-NEXT:    s_mul_hi_u32 s16, s12, s13
+; GCN-NEXT:    v_readfirstlane_b32 s13, v1
+; GCN-NEXT:    v_readfirstlane_b32 s14, v0
+; GCN-NEXT:    s_mul_i32 s15, s3, s13
+; GCN-NEXT:    s_mul_hi_u32 s17, s3, s14
+; GCN-NEXT:    s_mul_i32 s16, s12, s14
+; GCN-NEXT:    s_add_i32 s15, s17, s15
+; GCN-NEXT:    s_add_i32 s15, s15, s16
+; GCN-NEXT:    s_mul_i32 s18, s3, s14
+; GCN-NEXT:    s_mul_i32 s17, s14, s15
+; GCN-NEXT:    s_mul_hi_u32 s19, s14, s18
+; GCN-NEXT:    s_mul_hi_u32 s16, s14, s15
 ; GCN-NEXT:    s_add_u32 s17, s19, s17
 ; GCN-NEXT:    s_addc_u32 s16, 0, s16
-; GCN-NEXT:    s_mul_hi_u32 s20, s15, s18
-; GCN-NEXT:    s_mul_i32 s18, s15, s18
+; GCN-NEXT:    s_mul_hi_u32 s20, s13, s18
+; GCN-NEXT:    s_mul_i32 s18, s13, s18
 ; GCN-NEXT:    s_add_u32 s17, s17, s18
-; GCN-NEXT:    s_mul_hi_u32 s19, s15, s13
+; GCN-NEXT:    s_mul_hi_u32 s19, s13, s15
 ; GCN-NEXT:    s_addc_u32 s16, s16, s20
 ; GCN-NEXT:    s_addc_u32 s17, s19, 0
-; GCN-NEXT:    s_mul_i32 s13, s15, s13
-; GCN-NEXT:    s_add_u32 s13, s16, s13
+; GCN-NEXT:    s_mul_i32 s15, s13, s15
+; GCN-NEXT:    s_add_u32 s15, s16, s15
 ; GCN-NEXT:    s_addc_u32 s16, 0, s17
-; GCN-NEXT:    s_add_i32 s17, s12, s13
-; GCN-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[12:13], 0
-; GCN-NEXT:    s_addc_u32 s15, s15, s16
-; GCN-NEXT:    s_mul_i32 s12, s3, s15
-; GCN-NEXT:    s_mul_hi_u32 s13, s3, s17
-; GCN-NEXT:    s_add_i32 s12, s13, s12
-; GCN-NEXT:    s_mul_i32 s14, s14, s17
-; GCN-NEXT:    s_add_i32 s12, s12, s14
-; GCN-NEXT:    s_mul_i32 s3, s3, s17
-; GCN-NEXT:    s_mul_hi_u32 s14, s15, s3
-; GCN-NEXT:    s_mul_i32 s16, s15, s3
-; GCN-NEXT:    s_mul_i32 s19, s17, s12
-; GCN-NEXT:    s_mul_hi_u32 s3, s17, s3
-; GCN-NEXT:    s_mul_hi_u32 s18, s17, s12
+; GCN-NEXT:    s_add_i32 s14, s14, s15
+; GCN-NEXT:    s_addc_u32 s13, s13, s16
+; GCN-NEXT:    s_mul_i32 s15, s3, s13
+; GCN-NEXT:    s_mul_hi_u32 s16, s3, s14
+; GCN-NEXT:    s_add_i32 s15, s16, s15
+; GCN-NEXT:    s_mul_i32 s12, s12, s14
+; GCN-NEXT:    s_add_i32 s15, s15, s12
+; GCN-NEXT:    s_mul_i32 s3, s3, s14
+; GCN-NEXT:    s_mul_hi_u32 s16, s13, s3
+; GCN-NEXT:    s_mul_i32 s17, s13, s3
+; GCN-NEXT:    s_mul_i32 s19, s14, s15
+; GCN-NEXT:    s_mul_hi_u32 s3, s14, s3
+; GCN-NEXT:    s_mul_hi_u32 s18, s14, s15
 ; GCN-NEXT:    s_add_u32 s3, s3, s19
 ; GCN-NEXT:    s_addc_u32 s18, 0, s18
-; GCN-NEXT:    s_add_u32 s3, s3, s16
-; GCN-NEXT:    s_mul_hi_u32 s13, s15, s12
-; GCN-NEXT:    s_addc_u32 s3, s18, s14
-; GCN-NEXT:    s_addc_u32 s13, s13, 0
-; GCN-NEXT:    s_mul_i32 s12, s15, s12
-; GCN-NEXT:    s_add_u32 s3, s3, s12
-; GCN-NEXT:    s_addc_u32 s14, 0, s13
-; GCN-NEXT:    s_add_i32 s17, s17, s3
-; GCN-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[12:13], 0
-; GCN-NEXT:    s_addc_u32 s3, s15, s14
+; GCN-NEXT:    s_add_u32 s3, s3, s17
+; GCN-NEXT:    s_mul_hi_u32 s12, s13, s15
+; GCN-NEXT:    s_addc_u32 s3, s18, s16
+; GCN-NEXT:    s_addc_u32 s12, s12, 0
+; GCN-NEXT:    s_mul_i32 s15, s13, s15
+; GCN-NEXT:    s_add_u32 s3, s3, s15
+; GCN-NEXT:    s_addc_u32 s12, 0, s12
+; GCN-NEXT:    s_add_i32 s3, s14, s3
+; GCN-NEXT:    s_addc_u32 s16, s13, s12
 ; GCN-NEXT:    s_ashr_i32 s12, s5, 31
 ; GCN-NEXT:    s_add_u32 s14, s4, s12
 ; GCN-NEXT:    s_mov_b32 s13, s12
 ; GCN-NEXT:    s_addc_u32 s15, s5, s12
 ; GCN-NEXT:    s_xor_b64 s[14:15], s[14:15], s[12:13]
-; GCN-NEXT:    s_mul_i32 s16, s14, s3
-; GCN-NEXT:    s_mul_hi_u32 s18, s14, s17
-; GCN-NEXT:    s_mul_hi_u32 s5, s14, s3
-; GCN-NEXT:    s_add_u32 s16, s18, s16
+; GCN-NEXT:    s_mul_i32 s17, s14, s16
+; GCN-NEXT:    s_mul_hi_u32 s18, s14, s3
+; GCN-NEXT:    s_mul_hi_u32 s5, s14, s16
+; GCN-NEXT:    s_add_u32 s17, s18, s17
 ; GCN-NEXT:    s_addc_u32 s5, 0, s5
-; GCN-NEXT:    s_mul_hi_u32 s19, s15, s17
-; GCN-NEXT:    s_mul_i32 s17, s15, s17
-; GCN-NEXT:    s_add_u32 s16, s16, s17
-; GCN-NEXT:    s_mul_hi_u32 s18, s15, s3
-; GCN-NEXT:    s_addc_u32 s5, s5, s19
-; GCN-NEXT:    s_addc_u32 s16, s18, 0
+; GCN-NEXT:    s_mul_hi_u32 s19, s15, s3
 ; GCN-NEXT:    s_mul_i32 s3, s15, s3
-; GCN-NEXT:    s_add_u32 s3, s5, s3
-; GCN-NEXT:    s_addc_u32 s5, 0, s16
+; GCN-NEXT:    s_add_u32 s3, s17, s3
+; GCN-NEXT:    s_mul_hi_u32 s18, s15, s16
+; GCN-NEXT:    s_addc_u32 s3, s5, s19
+; GCN-NEXT:    s_addc_u32 s5, s18, 0
+; GCN-NEXT:    s_mul_i32 s16, s15, s16
+; GCN-NEXT:    s_add_u32 s3, s3, s16
+; GCN-NEXT:    s_addc_u32 s5, 0, s5
 ; GCN-NEXT:    s_mul_i32 s5, s10, s5
 ; GCN-NEXT:    s_mul_hi_u32 s16, s10, s3
 ; GCN-NEXT:    s_add_i32 s5, s16, s5
@@ -2974,11 +2950,9 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_mul_i32 s3, s10, s3
 ; GCN-NEXT:    s_sub_i32 s3, s14, s3
 ; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; GCN-NEXT:    s_subb_u32 s14, s18, s11
 ; GCN-NEXT:    s_sub_i32 s20, s3, s10
 ; GCN-NEXT:    s_cselect_b64 s[18:19], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
 ; GCN-NEXT:    s_subb_u32 s21, s14, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s21, s11
 ; GCN-NEXT:    s_cselect_b32 s22, -1, 0
@@ -2988,12 +2962,10 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_cselect_b32 s22, s23, s22
 ; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
 ; GCN-NEXT:    s_subb_u32 s14, s14, s11
-; GCN-NEXT:    s_sub_i32 s23, s20, s10
-; GCN-NEXT:    s_cselect_b64 s[18:19], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT:    s_sub_i32 s18, s20, s10
 ; GCN-NEXT:    s_subb_u32 s14, s14, 0
 ; GCN-NEXT:    s_cmp_lg_u32 s22, 0
-; GCN-NEXT:    s_cselect_b32 s18, s23, s20
+; GCN-NEXT:    s_cselect_b32 s18, s18, s20
 ; GCN-NEXT:    s_cselect_b32 s14, s14, s21
 ; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; GCN-NEXT:    s_subb_u32 s5, s15, s5
@@ -3463,11 +3435,9 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_readfirstlane_b32 s14, v0
 ; TONGA-NEXT:    s_sub_i32 s12, s12, s14
 ; TONGA-NEXT:    s_cselect_b64 s[14:15], 1, 0
-; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
 ; TONGA-NEXT:    s_subb_u32 s1, s1, s7
 ; TONGA-NEXT:    s_sub_i32 s18, s12, s6
 ; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; TONGA-NEXT:    s_subb_u32 s19, s1, 0
 ; TONGA-NEXT:    s_cmp_ge_u32 s19, s7
 ; TONGA-NEXT:    s_cselect_b32 s20, -1, 0
@@ -3477,12 +3447,10 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    s_cselect_b32 s20, s21, s20
 ; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; TONGA-NEXT:    s_subb_u32 s1, s1, s7
-; TONGA-NEXT:    s_sub_i32 s21, s18, s6
-; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_sub_i32 s16, s18, s6
 ; TONGA-NEXT:    s_subb_u32 s1, s1, 0
 ; TONGA-NEXT:    s_cmp_lg_u32 s20, 0
-; TONGA-NEXT:    s_cselect_b32 s16, s21, s18
+; TONGA-NEXT:    s_cselect_b32 s16, s16, s18
 ; TONGA-NEXT:    s_cselect_b32 s1, s1, s19
 ; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
 ; TONGA-NEXT:    s_subb_u32 s3, s13, s3
@@ -4934,7 +4902,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s6
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s7
 ; GCN-NEXT:    s_sub_u32 s17, 0, s6
-; GCN-NEXT:    s_subb_u32 s24, 0, s7
+; GCN-NEXT:    s_subb_u32 s22, 0, s7
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -4943,75 +4911,71 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_readfirstlane_b32 s25, v1
-; GCN-NEXT:    v_readfirstlane_b32 s22, v0
-; GCN-NEXT:    s_mul_i32 s23, s17, s25
-; GCN-NEXT:    s_mul_hi_u32 s27, s17, s22
-; GCN-NEXT:    s_mul_i32 s26, s24, s22
-; GCN-NEXT:    s_add_i32 s23, s27, s23
-; GCN-NEXT:    s_add_i32 s23, s23, s26
-; GCN-NEXT:    s_mul_i32 s28, s17, s22
-; GCN-NEXT:    s_mul_i32 s27, s22, s23
-; GCN-NEXT:    s_mul_hi_u32 s29, s22, s28
-; GCN-NEXT:    s_mul_hi_u32 s26, s22, s23
+; GCN-NEXT:    v_readfirstlane_b32 s23, v1
+; GCN-NEXT:    v_readfirstlane_b32 s24, v0
+; GCN-NEXT:    s_mul_i32 s25, s17, s23
+; GCN-NEXT:    s_mul_hi_u32 s27, s17, s24
+; GCN-NEXT:    s_mul_i32 s26, s22, s24
+; GCN-NEXT:    s_add_i32 s25, s27, s25
+; GCN-NEXT:    s_add_i32 s25, s25, s26
+; GCN-NEXT:    s_mul_i32 s28, s17, s24
+; GCN-NEXT:    s_mul_i32 s27, s24, s25
+; GCN-NEXT:    s_mul_hi_u32 s29, s24, s28
+; GCN-NEXT:    s_mul_hi_u32 s26, s24, s25
 ; GCN-NEXT:    s_add_u32 s27, s29, s27
 ; GCN-NEXT:    s_addc_u32 s26, 0, s26
-; GCN-NEXT:    s_mul_hi_u32 s30, s25, s28
-; GCN-NEXT:    s_mul_i32 s28, s25, s28
+; GCN-NEXT:    s_mul_hi_u32 s30, s23, s28
+; GCN-NEXT:    s_mul_i32 s28, s23, s28
 ; GCN-NEXT:    s_add_u32 s27, s27, s28
-; GCN-NEXT:    s_mul_hi_u32 s29, s25, s23
+; GCN-NEXT:    s_mul_hi_u32 s29, s23, s25
 ; GCN-NEXT:    s_addc_u32 s26, s26, s30
 ; GCN-NEXT:    s_addc_u32 s27, s29, 0
-; GCN-NEXT:    s_mul_i32 s23, s25, s23
-; GCN-NEXT:    s_add_u32 s23, s26, s23
+; GCN-NEXT:    s_mul_i32 s25, s23, s25
+; GCN-NEXT:    s_add_u32 s25, s26, s25
 ; GCN-NEXT:    s_addc_u32 s26, 0, s27
-; GCN-NEXT:    s_add_i32 s27, s22, s23
-; GCN-NEXT:    s_cselect_b64 s[22:23], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[22:23], 0
-; GCN-NEXT:    s_addc_u32 s25, s25, s26
-; GCN-NEXT:    s_mul_i32 s22, s17, s25
-; GCN-NEXT:    s_mul_hi_u32 s23, s17, s27
-; GCN-NEXT:    s_add_i32 s22, s23, s22
-; GCN-NEXT:    s_mul_i32 s24, s24, s27
-; GCN-NEXT:    s_add_i32 s22, s22, s24
-; GCN-NEXT:    s_mul_i32 s17, s17, s27
-; GCN-NEXT:    s_mul_hi_u32 s24, s25, s17
-; GCN-NEXT:    s_mul_i32 s26, s25, s17
-; GCN-NEXT:    s_mul_i32 s29, s27, s22
-; GCN-NEXT:    s_mul_hi_u32 s17, s27, s17
-; GCN-NEXT:    s_mul_hi_u32 s28, s27, s22
+; GCN-NEXT:    s_add_i32 s24, s24, s25
+; GCN-NEXT:    s_addc_u32 s23, s23, s26
+; GCN-NEXT:    s_mul_i32 s25, s17, s23
+; GCN-NEXT:    s_mul_hi_u32 s26, s17, s24
+; GCN-NEXT:    s_add_i32 s25, s26, s25
+; GCN-NEXT:    s_mul_i32 s22, s22, s24
+; GCN-NEXT:    s_add_i32 s25, s25, s22
+; GCN-NEXT:    s_mul_i32 s17, s17, s24
+; GCN-NEXT:    s_mul_hi_u32 s26, s23, s17
+; GCN-NEXT:    s_mul_i32 s27, s23, s17
+; GCN-NEXT:    s_mul_i32 s29, s24, s25
+; GCN-NEXT:    s_mul_hi_u32 s17, s24, s17
+; GCN-NEXT:    s_mul_hi_u32 s28, s24, s25
 ; GCN-NEXT:    s_add_u32 s17, s17, s29
 ; GCN-NEXT:    s_addc_u32 s28, 0, s28
-; GCN-NEXT:    s_add_u32 s17, s17, s26
-; GCN-NEXT:    s_mul_hi_u32 s23, s25, s22
-; GCN-NEXT:    s_addc_u32 s17, s28, s24
-; GCN-NEXT:    s_addc_u32 s23, s23, 0
-; GCN-NEXT:    s_mul_i32 s22, s25, s22
-; GCN-NEXT:    s_add_u32 s17, s17, s22
-; GCN-NEXT:    s_addc_u32 s24, 0, s23
-; GCN-NEXT:    s_add_i32 s27, s27, s17
-; GCN-NEXT:    s_cselect_b64 s[22:23], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[22:23], 0
-; GCN-NEXT:    s_addc_u32 s17, s25, s24
+; GCN-NEXT:    s_add_u32 s17, s17, s27
+; GCN-NEXT:    s_mul_hi_u32 s22, s23, s25
+; GCN-NEXT:    s_addc_u32 s17, s28, s26
+; GCN-NEXT:    s_addc_u32 s22, s22, 0
+; GCN-NEXT:    s_mul_i32 s25, s23, s25
+; GCN-NEXT:    s_add_u32 s17, s17, s25
+; GCN-NEXT:    s_addc_u32 s22, 0, s22
+; GCN-NEXT:    s_add_i32 s17, s24, s17
+; GCN-NEXT:    s_addc_u32 s26, s23, s22
 ; GCN-NEXT:    s_ashr_i32 s22, s19, 31
 ; GCN-NEXT:    s_add_u32 s24, s18, s22
 ; GCN-NEXT:    s_mov_b32 s23, s22
 ; GCN-NEXT:    s_addc_u32 s25, s19, s22
 ; GCN-NEXT:    s_xor_b64 s[24:25], s[24:25], s[22:23]
-; GCN-NEXT:    s_mul_i32 s26, s24, s17
-; GCN-NEXT:    s_mul_hi_u32 s28, s24, s27
-; GCN-NEXT:    s_mul_hi_u32 s19, s24, s17
-; GCN-NEXT:    s_add_u32 s26, s28, s26
+; GCN-NEXT:    s_mul_i32 s27, s24, s26
+; GCN-NEXT:    s_mul_hi_u32 s28, s24, s17
+; GCN-NEXT:    s_mul_hi_u32 s19, s24, s26
+; GCN-NEXT:    s_add_u32 s27, s28, s27
 ; GCN-NEXT:    s_addc_u32 s19, 0, s19
-; GCN-NEXT:    s_mul_hi_u32 s29, s25, s27
-; GCN-NEXT:    s_mul_i32 s27, s25, s27
-; GCN-NEXT:    s_add_u32 s26, s26, s27
-; GCN-NEXT:    s_mul_hi_u32 s28, s25, s17
-; GCN-NEXT:    s_addc_u32 s19, s19, s29
-; GCN-NEXT:    s_addc_u32 s26, s28, 0
+; GCN-NEXT:    s_mul_hi_u32 s29, s25, s17
 ; GCN-NEXT:    s_mul_i32 s17, s25, s17
-; GCN-NEXT:    s_add_u32 s17, s19, s17
-; GCN-NEXT:    s_addc_u32 s19, 0, s26
+; GCN-NEXT:    s_add_u32 s17, s27, s17
+; GCN-NEXT:    s_mul_hi_u32 s28, s25, s26
+; GCN-NEXT:    s_addc_u32 s17, s19, s29
+; GCN-NEXT:    s_addc_u32 s19, s28, 0
+; GCN-NEXT:    s_mul_i32 s26, s25, s26
+; GCN-NEXT:    s_add_u32 s17, s17, s26
+; GCN-NEXT:    s_addc_u32 s19, 0, s19
 ; GCN-NEXT:    s_mul_i32 s19, s6, s19
 ; GCN-NEXT:    s_mul_hi_u32 s26, s6, s17
 ; GCN-NEXT:    s_add_i32 s19, s26, s19
@@ -5021,11 +4985,9 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_mul_i32 s17, s6, s17
 ; GCN-NEXT:    s_sub_i32 s17, s24, s17
 ; GCN-NEXT:    s_cselect_b64 s[26:27], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[26:27], 0
 ; GCN-NEXT:    s_subb_u32 s24, s28, s7
 ; GCN-NEXT:    s_sub_i32 s30, s17, s6
 ; GCN-NEXT:    s_cselect_b64 s[28:29], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[28:29], 0
 ; GCN-NEXT:    s_subb_u32 s31, s24, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s31, s7
 ; GCN-NEXT:    s_cselect_b32 s33, -1, 0
@@ -5035,12 +4997,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_cselect_b32 s33, s34, s33
 ; GCN-NEXT:    s_cmp_lg_u64 s[28:29], 0
 ; GCN-NEXT:    s_subb_u32 s24, s24, s7
-; GCN-NEXT:    s_sub_i32 s34, s30, s6
-; GCN-NEXT:    s_cselect_b64 s[28:29], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[28:29], 0
+; GCN-NEXT:    s_sub_i32 s28, s30, s6
 ; GCN-NEXT:    s_subb_u32 s24, s24, 0
 ; GCN-NEXT:    s_cmp_lg_u32 s33, 0
-; GCN-NEXT:    s_cselect_b32 s28, s34, s30
+; GCN-NEXT:    s_cselect_b32 s28, s28, s30
 ; GCN-NEXT:    s_cselect_b32 s24, s24, s31
 ; GCN-NEXT:    s_cmp_lg_u64 s[26:27], 0
 ; GCN-NEXT:    s_subb_u32 s19, s25, s19
@@ -5091,7 +5051,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s18
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s19
 ; GCN-NEXT:    s_sub_u32 s13, 0, s18
-; GCN-NEXT:    s_subb_u32 s22, 0, s19
+; GCN-NEXT:    s_subb_u32 s20, 0, s19
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -5100,75 +5060,71 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_readfirstlane_b32 s23, v1
-; GCN-NEXT:    v_readfirstlane_b32 s20, v0
-; GCN-NEXT:    s_mul_i32 s21, s13, s23
-; GCN-NEXT:    s_mul_hi_u32 s25, s13, s20
-; GCN-NEXT:    s_mul_i32 s24, s22, s20
-; GCN-NEXT:    s_add_i32 s21, s25, s21
-; GCN-NEXT:    s_add_i32 s21, s21, s24
-; GCN-NEXT:    s_mul_i32 s26, s13, s20
-; GCN-NEXT:    s_mul_i32 s25, s20, s21
-; GCN-NEXT:    s_mul_hi_u32 s27, s20, s26
-; GCN-NEXT:    s_mul_hi_u32 s24, s20, s21
+; GCN-NEXT:    v_readfirstlane_b32 s21, v1
+; GCN-NEXT:    v_readfirstlane_b32 s22, v0
+; GCN-NEXT:    s_mul_i32 s23, s13, s21
+; GCN-NEXT:    s_mul_hi_u32 s25, s13, s22
+; GCN-NEXT:    s_mul_i32 s24, s20, s22
+; GCN-NEXT:    s_add_i32 s23, s25, s23
+; GCN-NEXT:    s_add_i32 s23, s23, s24
+; GCN-NEXT:    s_mul_i32 s26, s13, s22
+; GCN-NEXT:    s_mul_i32 s25, s22, s23
+; GCN-NEXT:    s_mul_hi_u32 s27, s22, s26
+; GCN-NEXT:    s_mul_hi_u32 s24, s22, s23
 ; GCN-NEXT:    s_add_u32 s25, s27, s25
 ; GCN-NEXT:    s_addc_u32 s24, 0, s24
-; GCN-NEXT:    s_mul_hi_u32 s28, s23, s26
-; GCN-NEXT:    s_mul_i32 s26, s23, s26
+; GCN-NEXT:    s_mul_hi_u32 s28, s21, s26
+; GCN-NEXT:    s_mul_i32 s26, s21, s26
 ; GCN-NEXT:    s_add_u32 s25, s25, s26
-; GCN-NEXT:    s_mul_hi_u32 s27, s23, s21
+; GCN-NEXT:    s_mul_hi_u32 s27, s21, s23
 ; GCN-NEXT:    s_addc_u32 s24, s24, s28
 ; GCN-NEXT:    s_addc_u32 s25, s27, 0
-; GCN-NEXT:    s_mul_i32 s21, s23, s21
-; GCN-NEXT:    s_add_u32 s21, s24, s21
+; GCN-NEXT:    s_mul_i32 s23, s21, s23
+; GCN-NEXT:    s_add_u32 s23, s24, s23
 ; GCN-NEXT:    s_addc_u32 s24, 0, s25
-; GCN-NEXT:    s_add_i32 s25, s20, s21
-; GCN-NEXT:    s_cselect_b64 s[20:21], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
-; GCN-NEXT:    s_addc_u32 s23, s23, s24
-; GCN-NEXT:    s_mul_i32 s20, s13, s23
-; GCN-NEXT:    s_mul_hi_u32 s21, s13, s25
-; GCN-NEXT:    s_add_i32 s20, s21, s20
-; GCN-NEXT:    s_mul_i32 s22, s22, s25
-; GCN-NEXT:    s_add_i32 s20, s20, s22
-; GCN-NEXT:    s_mul_i32 s13, s13, s25
-; GCN-NEXT:    s_mul_hi_u32 s22, s23, s13
-; GCN-NEXT:    s_mul_i32 s24, s23, s13
-; GCN-NEXT:    s_mul_i32 s27, s25, s20
-; GCN-NEXT:    s_mul_hi_u32 s13, s25, s13
-; GCN-NEXT:    s_mul_hi_u32 s26, s25, s20
+; GCN-NEXT:    s_add_i32 s22, s22, s23
+; GCN-NEXT:    s_addc_u32 s21, s21, s24
+; GCN-NEXT:    s_mul_i32 s23, s13, s21
+; GCN-NEXT:    s_mul_hi_u32 s24, s13, s22
+; GCN-NEXT:    s_add_i32 s23, s24, s23
+; GCN-NEXT:    s_mul_i32 s20, s20, s22
+; GCN-NEXT:    s_add_i32 s23, s23, s20
+; GCN-NEXT:    s_mul_i32 s13, s13, s22
+; GCN-NEXT:    s_mul_hi_u32 s24, s21, s13
+; GCN-NEXT:    s_mul_i32 s25, s21, s13
+; GCN-NEXT:    s_mul_i32 s27, s22, s23
+; GCN-NEXT:    s_mul_hi_u32 s13, s22, s13
+; GCN-NEXT:    s_mul_hi_u32 s26, s22, s23
 ; GCN-NEXT:    s_add_u32 s13, s13, s27
 ; GCN-NEXT:    s_addc_u32 s26, 0, s26
-; GCN-NEXT:    s_add_u32 s13, s13, s24
-; GCN-NEXT:    s_mul_hi_u32 s21, s23, s20
-; GCN-NEXT:    s_addc_u32 s13, s26, s22
-; GCN-NEXT:    s_addc_u32 s21, s21, 0
-; GCN-NEXT:    s_mul_i32 s20, s23, s20
-; GCN-NEXT:    s_add_u32 s13, s13, s20
-; GCN-NEXT:    s_addc_u32 s22, 0, s21
-; GCN-NEXT:    s_add_i32 s25, s25, s13
-; GCN-NEXT:    s_cselect_b64 s[20:21], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
-; GCN-NEXT:    s_addc_u32 s13, s23, s22
+; GCN-NEXT:    s_add_u32 s13, s13, s25
+; GCN-NEXT:    s_mul_hi_u32 s20, s21, s23
+; GCN-NEXT:    s_addc_u32 s13, s26, s24
+; GCN-NEXT:    s_addc_u32 s20, s20, 0
+; GCN-NEXT:    s_mul_i32 s23, s21, s23
+; GCN-NEXT:    s_add_u32 s13, s13, s23
+; GCN-NEXT:    s_addc_u32 s20, 0, s20
+; GCN-NEXT:    s_add_i32 s13, s22, s13
+; GCN-NEXT:    s_addc_u32 s24, s21, s20
 ; GCN-NEXT:    s_ashr_i32 s20, s15, 31
 ; GCN-NEXT:    s_add_u32 s22, s14, s20
 ; GCN-NEXT:    s_mov_b32 s21, s20
 ; GCN-NEXT:    s_addc_u32 s23, s15, s20
 ; GCN-NEXT:    s_xor_b64 s[22:23], s[22:23], s[20:21]
-; GCN-NEXT:    s_mul_i32 s24, s22, s13
-; GCN-NEXT:    s_mul_hi_u32 s26, s22, s25
-; GCN-NEXT:    s_mul_hi_u32 s15, s22, s13
-; GCN-NEXT:    s_add_u32 s24, s26, s24
+; GCN-NEXT:    s_mul_i32 s25, s22, s24
+; GCN-NEXT:    s_mul_hi_u32 s26, s22, s13
+; GCN-NEXT:    s_mul_hi_u32 s15, s22, s24
+; GCN-NEXT:    s_add_u32 s25, s26, s25
 ; GCN-NEXT:    s_addc_u32 s15, 0, s15
-; GCN-NEXT:    s_mul_hi_u32 s27, s23, s25
-; GCN-NEXT:    s_mul_i32 s25, s23, s25
-; GCN-NEXT:    s_add_u32 s24, s24, s25
-; GCN-NEXT:    s_mul_hi_u32 s26, s23, s13
-; GCN-NEXT:    s_addc_u32 s15, s15, s27
-; GCN-NEXT:    s_addc_u32 s24, s26, 0
+; GCN-NEXT:    s_mul_hi_u32 s27, s23, s13
 ; GCN-NEXT:    s_mul_i32 s13, s23, s13
-; GCN-NEXT:    s_add_u32 s13, s15, s13
-; GCN-NEXT:    s_addc_u32 s15, 0, s24
+; GCN-NEXT:    s_add_u32 s13, s25, s13
+; GCN-NEXT:    s_mul_hi_u32 s26, s23, s24
+; GCN-NEXT:    s_addc_u32 s13, s15, s27
+; GCN-NEXT:    s_addc_u32 s15, s26, 0
+; GCN-NEXT:    s_mul_i32 s24, s23, s24
+; GCN-NEXT:    s_add_u32 s13, s13, s24
+; GCN-NEXT:    s_addc_u32 s15, 0, s15
 ; GCN-NEXT:    s_mul_i32 s15, s18, s15
 ; GCN-NEXT:    s_mul_hi_u32 s24, s18, s13
 ; GCN-NEXT:    s_add_i32 s15, s24, s15
@@ -5178,11 +5134,9 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_mul_i32 s13, s18, s13
 ; GCN-NEXT:    s_sub_i32 s13, s22, s13
 ; GCN-NEXT:    s_cselect_b64 s[24:25], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[24:25], 0
 ; GCN-NEXT:    s_subb_u32 s22, s26, s19
 ; GCN-NEXT:    s_sub_i32 s28, s13, s18
 ; GCN-NEXT:    s_cselect_b64 s[26:27], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[26:27], 0
 ; GCN-NEXT:    s_subb_u32 s29, s22, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s29, s19
 ; GCN-NEXT:    s_cselect_b32 s30, -1, 0
@@ -5192,12 +5146,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_cselect_b32 s30, s31, s30
 ; GCN-NEXT:    s_cmp_lg_u64 s[26:27], 0
 ; GCN-NEXT:    s_subb_u32 s22, s22, s19
-; GCN-NEXT:    s_sub_i32 s31, s28, s18
-; GCN-NEXT:    s_cselect_b64 s[26:27], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[26:27], 0
+; GCN-NEXT:    s_sub_i32 s26, s28, s18
 ; GCN-NEXT:    s_subb_u32 s22, s22, 0
 ; GCN-NEXT:    s_cmp_lg_u32 s30, 0
-; GCN-NEXT:    s_cselect_b32 s26, s31, s28
+; GCN-NEXT:    s_cselect_b32 s26, s26, s28
 ; GCN-NEXT:    s_cselect_b32 s22, s22, s29
 ; GCN-NEXT:    s_cmp_lg_u64 s[24:25], 0
 ; GCN-NEXT:    s_subb_u32 s15, s23, s15
@@ -5257,7 +5209,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s14
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s15
 ; GCN-NEXT:    s_sub_u32 s9, 0, s14
-; GCN-NEXT:    s_subb_u32 s18, 0, s15
+; GCN-NEXT:    s_subb_u32 s16, 0, s15
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -5266,75 +5218,71 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_readfirstlane_b32 s19, v1
-; GCN-NEXT:    v_readfirstlane_b32 s16, v0
-; GCN-NEXT:    s_mul_i32 s17, s9, s19
-; GCN-NEXT:    s_mul_hi_u32 s21, s9, s16
-; GCN-NEXT:    s_mul_i32 s20, s18, s16
-; GCN-NEXT:    s_add_i32 s17, s21, s17
-; GCN-NEXT:    s_add_i32 s17, s17, s20
-; GCN-NEXT:    s_mul_i32 s22, s9, s16
-; GCN-NEXT:    s_mul_i32 s21, s16, s17
-; GCN-NEXT:    s_mul_hi_u32 s23, s16, s22
-; GCN-NEXT:    s_mul_hi_u32 s20, s16, s17
+; GCN-NEXT:    v_readfirstlane_b32 s17, v1
+; GCN-NEXT:    v_readfirstlane_b32 s18, v0
+; GCN-NEXT:    s_mul_i32 s19, s9, s17
+; GCN-NEXT:    s_mul_hi_u32 s21, s9, s18
+; GCN-NEXT:    s_mul_i32 s20, s16, s18
+; GCN-NEXT:    s_add_i32 s19, s21, s19
+; GCN-NEXT:    s_add_i32 s19, s19, s20
+; GCN-NEXT:    s_mul_i32 s22, s9, s18
+; GCN-NEXT:    s_mul_i32 s21, s18, s19
+; GCN-NEXT:    s_mul_hi_u32 s23, s18, s22
+; GCN-NEXT:    s_mul_hi_u32 s20, s18, s19
 ; GCN-NEXT:    s_add_u32 s21, s23, s21
 ; GCN-NEXT:    s_addc_u32 s20, 0, s20
-; GCN-NEXT:    s_mul_hi_u32 s24, s19, s22
-; GCN-NEXT:    s_mul_i32 s22, s19, s22
+; GCN-NEXT:    s_mul_hi_u32 s24, s17, s22
+; GCN-NEXT:    s_mul_i32 s22, s17, s22
 ; GCN-NEXT:    s_add_u32 s21, s21, s22
-; GCN-NEXT:    s_mul_hi_u32 s23, s19, s17
+; GCN-NEXT:    s_mul_hi_u32 s23, s17, s19
 ; GCN-NEXT:    s_addc_u32 s20, s20, s24
 ; GCN-NEXT:    s_addc_u32 s21, s23, 0
-; GCN-NEXT:    s_mul_i32 s17, s19, s17
-; GCN-NEXT:    s_add_u32 s17, s20, s17
+; GCN-NEXT:    s_mul_i32 s19, s17, s19
+; GCN-NEXT:    s_add_u32 s19, s20, s19
 ; GCN-NEXT:    s_addc_u32 s20, 0, s21
-; GCN-NEXT:    s_add_i32 s21, s16, s17
-; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
-; GCN-NEXT:    s_addc_u32 s19, s19, s20
-; GCN-NEXT:    s_mul_i32 s16, s9, s19
-; GCN-NEXT:    s_mul_hi_u32 s17, s9, s21
-; GCN-NEXT:    s_add_i32 s16, s17, s16
-; GCN-NEXT:    s_mul_i32 s18, s18, s21
-; GCN-NEXT:    s_add_i32 s16, s16, s18
-; GCN-NEXT:    s_mul_i32 s9, s9, s21
-; GCN-NEXT:    s_mul_hi_u32 s18, s19, s9
-; GCN-NEXT:    s_mul_i32 s20, s19, s9
-; GCN-NEXT:    s_mul_i32 s23, s21, s16
-; GCN-NEXT:    s_mul_hi_u32 s9, s21, s9
-; GCN-NEXT:    s_mul_hi_u32 s22, s21, s16
+; GCN-NEXT:    s_add_i32 s18, s18, s19
+; GCN-NEXT:    s_addc_u32 s17, s17, s20
+; GCN-NEXT:    s_mul_i32 s19, s9, s17
+; GCN-NEXT:    s_mul_hi_u32 s20, s9, s18
+; GCN-NEXT:    s_add_i32 s19, s20, s19
+; GCN-NEXT:    s_mul_i32 s16, s16, s18
+; GCN-NEXT:    s_add_i32 s19, s19, s16
+; GCN-NEXT:    s_mul_i32 s9, s9, s18
+; GCN-NEXT:    s_mul_hi_u32 s20, s17, s9
+; GCN-NEXT:    s_mul_i32 s21, s17, s9
+; GCN-NEXT:    s_mul_i32 s23, s18, s19
+; GCN-NEXT:    s_mul_hi_u32 s9, s18, s9
+; GCN-NEXT:    s_mul_hi_u32 s22, s18, s19
 ; GCN-NEXT:    s_add_u32 s9, s9, s23
 ; GCN-NEXT:    s_addc_u32 s22, 0, s22
-; GCN-NEXT:    s_add_u32 s9, s9, s20
-; GCN-NEXT:    s_mul_hi_u32 s17, s19, s16
-; GCN-NEXT:    s_addc_u32 s9, s22, s18
-; GCN-NEXT:    s_addc_u32 s17, s17, 0
-; GCN-NEXT:    s_mul_i32 s16, s19, s16
-; GCN-NEXT:    s_add_u32 s9, s9, s16
-; GCN-NEXT:    s_addc_u32 s18, 0, s17
-; GCN-NEXT:    s_add_i32 s21, s21, s9
-; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
-; GCN-NEXT:    s_addc_u32 s9, s19, s18
+; GCN-NEXT:    s_add_u32 s9, s9, s21
+; GCN-NEXT:    s_mul_hi_u32 s16, s17, s19
+; GCN-NEXT:    s_addc_u32 s9, s22, s20
+; GCN-NEXT:    s_addc_u32 s16, s16, 0
+; GCN-NEXT:    s_mul_i32 s19, s17, s19
+; GCN-NEXT:    s_add_u32 s9, s9, s19
+; GCN-NEXT:    s_addc_u32 s16, 0, s16
+; GCN-NEXT:    s_add_i32 s9, s18, s9
+; GCN-NEXT:    s_addc_u32 s20, s17, s16
 ; GCN-NEXT:    s_ashr_i32 s16, s11, 31
 ; GCN-NEXT:    s_add_u32 s18, s10, s16
 ; GCN-NEXT:    s_mov_b32 s17, s16
 ; GCN-NEXT:    s_addc_u32 s19, s11, s16
 ; GCN-NEXT:    s_xor_b64 s[18:19], s[18:19], s[16:17]
-; GCN-NEXT:    s_mul_i32 s20, s18, s9
-; GCN-NEXT:    s_mul_hi_u32 s22, s18, s21
-; GCN-NEXT:    s_mul_hi_u32 s11, s18, s9
-; GCN-NEXT:    s_add_u32 s20, s22, s20
+; GCN-NEXT:    s_mul_i32 s21, s18, s20
+; GCN-NEXT:    s_mul_hi_u32 s22, s18, s9
+; GCN-NEXT:    s_mul_hi_u32 s11, s18, s20
+; GCN-NEXT:    s_add_u32 s21, s22, s21
 ; GCN-NEXT:    s_addc_u32 s11, 0, s11
-; GCN-NEXT:    s_mul_hi_u32 s23, s19, s21
-; GCN-NEXT:    s_mul_i32 s21, s19, s21
-; GCN-NEXT:    s_add_u32 s20, s20, s21
-; GCN-NEXT:    s_mul_hi_u32 s22, s19, s9
-; GCN-NEXT:    s_addc_u32 s11, s11, s23
-; GCN-NEXT:    s_addc_u32 s20, s22, 0
+; GCN-NEXT:    s_mul_hi_u32 s23, s19, s9
 ; GCN-NEXT:    s_mul_i32 s9, s19, s9
-; GCN-NEXT:    s_add_u32 s9, s11, s9
-; GCN-NEXT:    s_addc_u32 s11, 0, s20
+; GCN-NEXT:    s_add_u32 s9, s21, s9
+; GCN-NEXT:    s_mul_hi_u32 s22, s19, s20
+; GCN-NEXT:    s_addc_u32 s9, s11, s23
+; GCN-NEXT:    s_addc_u32 s11, s22, 0
+; GCN-NEXT:    s_mul_i32 s20, s19, s20
+; GCN-NEXT:    s_add_u32 s9, s9, s20
+; GCN-NEXT:    s_addc_u32 s11, 0, s11
 ; GCN-NEXT:    s_mul_i32 s11, s14, s11
 ; GCN-NEXT:    s_mul_hi_u32 s20, s14, s9
 ; GCN-NEXT:    s_add_i32 s11, s20, s11
@@ -5344,11 +5292,9 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_mul_i32 s9, s14, s9
 ; GCN-NEXT:    s_sub_i32 s9, s18, s9
 ; GCN-NEXT:    s_cselect_b64 s[20:21], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
 ; GCN-NEXT:    s_subb_u32 s18, s22, s15
 ; GCN-NEXT:    s_sub_i32 s24, s9, s14
 ; GCN-NEXT:    s_cselect_b64 s[22:23], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[22:23], 0
 ; GCN-NEXT:    s_subb_u32 s25, s18, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s25, s15
 ; GCN-NEXT:    s_cselect_b32 s26, -1, 0
@@ -5358,12 +5304,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_cselect_b32 s26, s27, s26
 ; GCN-NEXT:    s_cmp_lg_u64 s[22:23], 0
 ; GCN-NEXT:    s_subb_u32 s18, s18, s15
-; GCN-NEXT:    s_sub_i32 s27, s24, s14
-; GCN-NEXT:    s_cselect_b64 s[22:23], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[22:23], 0
+; GCN-NEXT:    s_sub_i32 s22, s24, s14
 ; GCN-NEXT:    s_subb_u32 s18, s18, 0
 ; GCN-NEXT:    s_cmp_lg_u32 s26, 0
-; GCN-NEXT:    s_cselect_b32 s22, s27, s24
+; GCN-NEXT:    s_cselect_b32 s22, s22, s24
 ; GCN-NEXT:    s_cselect_b32 s18, s18, s25
 ; GCN-NEXT:    s_cmp_lg_u64 s[20:21], 0
 ; GCN-NEXT:    s_subb_u32 s11, s19, s11
@@ -5420,7 +5364,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s10
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s11
 ; GCN-NEXT:    s_sub_u32 s3, 0, s10
-; GCN-NEXT:    s_subb_u32 s14, 0, s11
+; GCN-NEXT:    s_subb_u32 s12, 0, s11
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -5429,75 +5373,71 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GCN-NEXT:    v_readfirstlane_b32 s15, v1
-; GCN-NEXT:    v_readfirstlane_b32 s12, v0
-; GCN-NEXT:    s_mul_i32 s13, s3, s15
-; GCN-NEXT:    s_mul_hi_u32 s17, s3, s12
-; GCN-NEXT:    s_mul_i32 s16, s14, s12
-; GCN-NEXT:    s_add_i32 s13, s17, s13
-; GCN-NEXT:    s_add_i32 s13, s13, s16
-; GCN-NEXT:    s_mul_i32 s18, s3, s12
-; GCN-NEXT:    s_mul_i32 s17, s12, s13
-; GCN-NEXT:    s_mul_hi_u32 s19, s12, s18
-; GCN-NEXT:    s_mul_hi_u32 s16, s12, s13
+; GCN-NEXT:    v_readfirstlane_b32 s13, v1
+; GCN-NEXT:    v_readfirstlane_b32 s14, v0
+; GCN-NEXT:    s_mul_i32 s15, s3, s13
+; GCN-NEXT:    s_mul_hi_u32 s17, s3, s14
+; GCN-NEXT:    s_mul_i32 s16, s12, s14
+; GCN-NEXT:    s_add_i32 s15, s17, s15
+; GCN-NEXT:    s_add_i32 s15, s15, s16
+; GCN-NEXT:    s_mul_i32 s18, s3, s14
+; GCN-NEXT:    s_mul_i32 s17, s14, s15
+; GCN-NEXT:    s_mul_hi_u32 s19, s14, s18
+; GCN-NEXT:    s_mul_hi_u32 s16, s14, s15
 ; GCN-NEXT:    s_add_u32 s17, s19, s17
 ; GCN-NEXT:    s_addc_u32 s16, 0, s16
-; GCN-NEXT:    s_mul_hi_u32 s20, s15, s18
-; GCN-NEXT:    s_mul_i32 s18, s15, s18
+; GCN-NEXT:    s_mul_hi_u32 s20, s13, s18
+; GCN-NEXT:    s_mul_i32 s18, s13, s18
 ; GCN-NEXT:    s_add_u32 s17, s17, s18
-; GCN-NEXT:    s_mul_hi_u32 s19, s15, s13
+; GCN-NEXT:    s_mul_hi_u32 s19, s13, s15
 ; GCN-NEXT:    s_addc_u32 s16, s16, s20
 ; GCN-NEXT:    s_addc_u32 s17, s19, 0
-; GCN-NEXT:    s_mul_i32 s13, s15, s13
-; GCN-NEXT:    s_add_u32 s13, s16, s13
+; GCN-NEXT:    s_mul_i32 s15, s13, s15
+; GCN-NEXT:    s_add_u32 s15, s16, s15
 ; GCN-NEXT:    s_addc_u32 s16, 0, s17
-; GCN-NEXT:    s_add_i32 s17, s12, s13
-; GCN-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[12:13], 0
-; GCN-NEXT:    s_addc_u32 s15, s15, s16
-; GCN-NEXT:    s_mul_i32 s12, s3, s15
-; GCN-NEXT:    s_mul_hi_u32 s13, s3, s17
-; GCN-NEXT:    s_add_i32 s12, s13, s12
-; GCN-NEXT:    s_mul_i32 s14, s14, s17
-; GCN-NEXT:    s_add_i32 s12, s12, s14
-; GCN-NEXT:    s_mul_i32 s3, s3, s17
-; GCN-NEXT:    s_mul_hi_u32 s14, s15, s3
-; GCN-NEXT:    s_mul_i32 s16, s15, s3
-; GCN-NEXT:    s_mul_i32 s19, s17, s12
-; GCN-NEXT:    s_mul_hi_u32 s3, s17, s3
-; GCN-NEXT:    s_mul_hi_u32 s18, s17, s12
+; GCN-NEXT:    s_add_i32 s14, s14, s15
+; GCN-NEXT:    s_addc_u32 s13, s13, s16
+; GCN-NEXT:    s_mul_i32 s15, s3, s13
+; GCN-NEXT:    s_mul_hi_u32 s16, s3, s14
+; GCN-NEXT:    s_add_i32 s15, s16, s15
+; GCN-NEXT:    s_mul_i32 s12, s12, s14
+; GCN-NEXT:    s_add_i32 s15, s15, s12
+; GCN-NEXT:    s_mul_i32 s3, s3, s14
+; GCN-NEXT:    s_mul_hi_u32 s16, s13, s3
+; GCN-NEXT:    s_mul_i32 s17, s13, s3
+; GCN-NEXT:    s_mul_i32 s19, s14, s15
+; GCN-NEXT:    s_mul_hi_u32 s3, s14, s3
+; GCN-NEXT:    s_mul_hi_u32 s18, s14, s15
 ; GCN-NEXT:    s_add_u32 s3, s3, s19
 ; GCN-NEXT:    s_addc_u32 s18, 0, s18
-; GCN-NEXT:    s_add_u32 s3, s3, s16
-; GCN-NEXT:    s_mul_hi_u32 s13, s15, s12
-; GCN-NEXT:    s_addc_u32 s3, s18, s14
-; GCN-NEXT:    s_addc_u32 s13, s13, 0
-; GCN-NEXT:    s_mul_i32 s12, s15, s12
-; GCN-NEXT:    s_add_u32 s3, s3, s12
-; GCN-NEXT:    s_addc_u32 s14, 0, s13
-; GCN-NEXT:    s_add_i32 s17, s17, s3
-; GCN-NEXT:    s_cselect_b64 s[12:13], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[12:13], 0
-; GCN-NEXT:    s_addc_u32 s3, s15, s14
+; GCN-NEXT:    s_add_u32 s3, s3, s17
+; GCN-NEXT:    s_mul_hi_u32 s12, s13, s15
+; GCN-NEXT:    s_addc_u32 s3, s18, s16
+; GCN-NEXT:    s_addc_u32 s12, s12, 0
+; GCN-NEXT:    s_mul_i32 s15, s13, s15
+; GCN-NEXT:    s_add_u32 s3, s3, s15
+; GCN-NEXT:    s_addc_u32 s12, 0, s12
+; GCN-NEXT:    s_add_i32 s3, s14, s3
+; GCN-NEXT:    s_addc_u32 s16, s13, s12
 ; GCN-NEXT:    s_ashr_i32 s12, s5, 31
 ; GCN-NEXT:    s_add_u32 s14, s4, s12
 ; GCN-NEXT:    s_mov_b32 s13, s12
 ; GCN-NEXT:    s_addc_u32 s15, s5, s12
 ; GCN-NEXT:    s_xor_b64 s[14:15], s[14:15], s[12:13]
-; GCN-NEXT:    s_mul_i32 s16, s14, s3
-; GCN-NEXT:    s_mul_hi_u32 s18, s14, s17
-; GCN-NEXT:    s_mul_hi_u32 s5, s14, s3
-; GCN-NEXT:    s_add_u32 s16, s18, s16
+; GCN-NEXT:    s_mul_i32 s17, s14, s16
+; GCN-NEXT:    s_mul_hi_u32 s18, s14, s3
+; GCN-NEXT:    s_mul_hi_u32 s5, s14, s16
+; GCN-NEXT:    s_add_u32 s17, s18, s17
 ; GCN-NEXT:    s_addc_u32 s5, 0, s5
-; GCN-NEXT:    s_mul_hi_u32 s19, s15, s17
-; GCN-NEXT:    s_mul_i32 s17, s15, s17
-; GCN-NEXT:    s_add_u32 s16, s16, s17
-; GCN-NEXT:    s_mul_hi_u32 s18, s15, s3
-; GCN-NEXT:    s_addc_u32 s5, s5, s19
-; GCN-NEXT:    s_addc_u32 s16, s18, 0
+; GCN-NEXT:    s_mul_hi_u32 s19, s15, s3
 ; GCN-NEXT:    s_mul_i32 s3, s15, s3
-; GCN-NEXT:    s_add_u32 s3, s5, s3
-; GCN-NEXT:    s_addc_u32 s5, 0, s16
+; GCN-NEXT:    s_add_u32 s3, s17, s3
+; GCN-NEXT:    s_mul_hi_u32 s18, s15, s16
+; GCN-NEXT:    s_addc_u32 s3, s5, s19
+; GCN-NEXT:    s_addc_u32 s5, s18, 0
+; GCN-NEXT:    s_mul_i32 s16, s15, s16
+; GCN-NEXT:    s_add_u32 s3, s3, s16
+; GCN-NEXT:    s_addc_u32 s5, 0, s5
 ; GCN-NEXT:    s_mul_i32 s5, s10, s5
 ; GCN-NEXT:    s_mul_hi_u32 s16, s10, s3
 ; GCN-NEXT:    s_add_i32 s5, s16, s5
@@ -5507,11 +5447,9 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_mul_i32 s3, s10, s3
 ; GCN-NEXT:    s_sub_i32 s3, s14, s3
 ; GCN-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; GCN-NEXT:    s_subb_u32 s14, s18, s11
 ; GCN-NEXT:    s_sub_i32 s20, s3, s10
 ; GCN-NEXT:    s_cselect_b64 s[18:19], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
 ; GCN-NEXT:    s_subb_u32 s21, s14, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s21, s11
 ; GCN-NEXT:    s_cselect_b32 s22, -1, 0
@@ -5521,12 +5459,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; GCN-NEXT:    s_cselect_b32 s22, s23, s22
 ; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
 ; GCN-NEXT:    s_subb_u32 s14, s14, s11
-; GCN-NEXT:    s_sub_i32 s23, s20, s10
-; GCN-NEXT:    s_cselect_b64 s[18:19], 1, 0
-; GCN-NEXT:    s_cmp_lg_u64 s[18:19], 0
+; GCN-NEXT:    s_sub_i32 s18, s20, s10
 ; GCN-NEXT:    s_subb_u32 s14, s14, 0
 ; GCN-NEXT:    s_cmp_lg_u32 s22, 0
-; GCN-NEXT:    s_cselect_b32 s18, s23, s20
+; GCN-NEXT:    s_cselect_b32 s18, s18, s20
 ; GCN-NEXT:    s_cselect_b32 s14, s14, s21
 ; GCN-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; GCN-NEXT:    s_subb_u32 s5, s15, s5
@@ -6299,11 +6235,9 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    v_readfirstlane_b32 s14, v8
 ; TONGA-NEXT:    s_sub_i32 s12, s12, s14
 ; TONGA-NEXT:    s_cselect_b64 s[14:15], 1, 0
-; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
 ; TONGA-NEXT:    s_subb_u32 s1, s1, s7
 ; TONGA-NEXT:    s_sub_i32 s18, s12, s6
 ; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; TONGA-NEXT:    s_subb_u32 s19, s1, 0
 ; TONGA-NEXT:    s_cmp_ge_u32 s19, s7
 ; TONGA-NEXT:    s_cselect_b32 s20, -1, 0
@@ -6313,12 +6247,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
 ; TONGA-NEXT:    s_cselect_b32 s20, s21, s20
 ; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
 ; TONGA-NEXT:    s_subb_u32 s1, s1, s7
-; TONGA-NEXT:    s_sub_i32 s21, s18, s6
-; TONGA-NEXT:    s_cselect_b64 s[16:17], 1, 0
-; TONGA-NEXT:    s_cmp_lg_u64 s[16:17], 0
+; TONGA-NEXT:    s_sub_i32 s16, s18, s6
 ; TONGA-NEXT:    s_subb_u32 s1, s1, 0
 ; TONGA-NEXT:    s_cmp_lg_u32 s20, 0
-; TONGA-NEXT:    s_cselect_b32 s16, s21, s18
+; TONGA-NEXT:    s_cselect_b32 s16, s16, s18
 ; TONGA-NEXT:    s_cselect_b32 s1, s1, s19
 ; TONGA-NEXT:    s_cmp_lg_u64 s[14:15], 0
 ; TONGA-NEXT:    s_subb_u32 s3, s13, s3
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 03bb85ec0e91a..908e8b75de9e8 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -8,12 +8,11 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0xd
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s9
-; GCN-NEXT:    s_sub_u32 s10, 0, s8
-; GCN-NEXT:    s_subb_u32 s11, 0, s9
+; GCN-NEXT:    s_sub_u32 s0, 0, s8
+; GCN-NEXT:    s_subb_u32 s1, 0, s9
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -22,71 +21,65 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, s10, v0
-; GCN-NEXT:    v_readfirstlane_b32 s12, v1
-; GCN-NEXT:    v_readfirstlane_b32 s0, v0
-; GCN-NEXT:    s_mul_i32 s1, s10, s12
-; GCN-NEXT:    v_readfirstlane_b32 s15, v2
-; GCN-NEXT:    s_mul_i32 s13, s11, s0
-; GCN-NEXT:    s_mul_i32 s14, s10, s0
-; GCN-NEXT:    s_add_i32 s1, s15, s1
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s14
-; GCN-NEXT:    s_add_i32 s1, s1, s13
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s1
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, s14
-; GCN-NEXT:    v_readfirstlane_b32 s13, v3
-; GCN-NEXT:    s_mul_i32 s15, s0, s1
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, s1
-; GCN-NEXT:    s_add_u32 s13, s13, s15
+; GCN-NEXT:    v_mul_hi_u32 v2, s0, v0
+; GCN-NEXT:    v_readfirstlane_b32 s10, v1
+; GCN-NEXT:    v_readfirstlane_b32 s2, v0
+; GCN-NEXT:    s_mul_i32 s11, s0, s10
+; GCN-NEXT:    v_readfirstlane_b32 s14, v2
+; GCN-NEXT:    s_mul_i32 s12, s1, s2
+; GCN-NEXT:    s_mul_i32 s13, s0, s2
+; GCN-NEXT:    s_add_i32 s11, s14, s11
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s13
+; GCN-NEXT:    s_add_i32 s11, s11, s12
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s11
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s13
+; GCN-NEXT:    v_readfirstlane_b32 s12, v3
+; GCN-NEXT:    s_mul_i32 s15, s2, s11
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s11
+; GCN-NEXT:    s_add_u32 s12, s12, s15
 ; GCN-NEXT:    v_readfirstlane_b32 s15, v0
-; GCN-NEXT:    s_mul_i32 s14, s12, s14
+; GCN-NEXT:    s_mul_i32 s13, s10, s13
 ; GCN-NEXT:    s_addc_u32 s15, 0, s15
-; GCN-NEXT:    v_readfirstlane_b32 s16, v4
-; GCN-NEXT:    s_add_u32 s13, s13, s14
-; GCN-NEXT:    s_addc_u32 s13, s15, s16
-; GCN-NEXT:    v_readfirstlane_b32 s14, v1
-; GCN-NEXT:    s_addc_u32 s14, s14, 0
-; GCN-NEXT:    s_mul_i32 s1, s12, s1
-; GCN-NEXT:    s_add_u32 s1, s13, s1
-; GCN-NEXT:    s_addc_u32 s13, 0, s14
-; GCN-NEXT:    s_add_i32 s14, s0, s1
-; GCN-NEXT:    v_mov_b32_e32 v0, s14
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GCN-NEXT:    v_mul_hi_u32 v0, s10, v0
-; GCN-NEXT:    s_or_b32 s0, s0, s1
-; GCN-NEXT:    s_cmp_lg_u32 s0, 0
-; GCN-NEXT:    s_addc_u32 s12, s12, s13
-; GCN-NEXT:    s_mul_i32 s0, s10, s12
-; GCN-NEXT:    v_readfirstlane_b32 s1, v0
-; GCN-NEXT:    s_add_i32 s0, s1, s0
-; GCN-NEXT:    s_mul_i32 s11, s11, s14
-; GCN-NEXT:    s_mul_i32 s1, s10, s14
-; GCN-NEXT:    s_add_i32 s0, s0, s11
-; GCN-NEXT:    v_mov_b32_e32 v2, s1
-; GCN-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-NEXT:    v_mul_hi_u32 v3, s12, v2
-; GCN-NEXT:    v_mul_hi_u32 v2, s14, v2
-; GCN-NEXT:    v_mul_hi_u32 v1, s12, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s14, v0
-; GCN-NEXT:    s_mul_i32 s11, s14, s0
-; GCN-NEXT:    v_readfirstlane_b32 s15, v2
-; GCN-NEXT:    s_add_u32 s11, s15, s11
+; GCN-NEXT:    v_readfirstlane_b32 s14, v4
+; GCN-NEXT:    s_add_u32 s12, s12, s13
+; GCN-NEXT:    s_addc_u32 s12, s15, s14
+; GCN-NEXT:    v_readfirstlane_b32 s13, v1
+; GCN-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-NEXT:    s_mul_i32 s11, s10, s11
+; GCN-NEXT:    s_add_u32 s11, s12, s11
+; GCN-NEXT:    s_addc_u32 s12, 0, s13
+; GCN-NEXT:    s_add_i32 s11, s2, s11
+; GCN-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-NEXT:    v_mul_hi_u32 v0, s0, v0
+; GCN-NEXT:    s_addc_u32 s10, s10, s12
+; GCN-NEXT:    s_mul_i32 s12, s0, s10
+; GCN-NEXT:    s_mul_i32 s1, s1, s11
 ; GCN-NEXT:    v_readfirstlane_b32 s13, v0
-; GCN-NEXT:    s_mul_i32 s1, s12, s1
-; GCN-NEXT:    s_addc_u32 s13, 0, s13
-; GCN-NEXT:    v_readfirstlane_b32 s10, v3
-; GCN-NEXT:    s_add_u32 s1, s11, s1
-; GCN-NEXT:    s_addc_u32 s1, s13, s10
-; GCN-NEXT:    v_readfirstlane_b32 s10, v1
-; GCN-NEXT:    s_addc_u32 s10, s10, 0
-; GCN-NEXT:    s_mul_i32 s0, s12, s0
-; GCN-NEXT:    s_add_u32 s0, s1, s0
-; GCN-NEXT:    s_addc_u32 s10, 0, s10
-; GCN-NEXT:    s_add_i32 s11, s14, s0
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GCN-NEXT:    s_or_b32 s0, s0, s1
-; GCN-NEXT:    s_cmp_lg_u32 s0, 0
-; GCN-NEXT:    s_addc_u32 s1, s12, s10
+; GCN-NEXT:    s_add_i32 s12, s13, s12
+; GCN-NEXT:    s_mul_i32 s0, s0, s11
+; GCN-NEXT:    s_add_i32 s1, s12, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mul_hi_u32 v3, s10, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s10, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GCN-NEXT:    s_mul_i32 s13, s11, s1
+; GCN-NEXT:    v_readfirstlane_b32 s15, v2
+; GCN-NEXT:    s_add_u32 s13, s15, s13
+; GCN-NEXT:    v_readfirstlane_b32 s14, v0
+; GCN-NEXT:    s_mul_i32 s0, s10, s0
+; GCN-NEXT:    s_addc_u32 s14, 0, s14
+; GCN-NEXT:    v_readfirstlane_b32 s12, v3
+; GCN-NEXT:    s_add_u32 s0, s13, s0
+; GCN-NEXT:    s_addc_u32 s0, s14, s12
+; GCN-NEXT:    v_readfirstlane_b32 s12, v1
+; GCN-NEXT:    s_addc_u32 s12, s12, 0
+; GCN-NEXT:    s_mul_i32 s1, s10, s1
+; GCN-NEXT:    s_add_u32 s0, s0, s1
+; GCN-NEXT:    s_addc_u32 s1, 0, s12
+; GCN-NEXT:    s_add_i32 s11, s11, s0
+; GCN-NEXT:    s_addc_u32 s1, s10, s1
 ; GCN-NEXT:    v_mov_b32_e32 v0, s1
 ; GCN-NEXT:    v_mul_hi_u32 v1, s6, v0
 ; GCN-NEXT:    v_mov_b32_e32 v2, s11
@@ -115,46 +108,42 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-NEXT:    v_readfirstlane_b32 s10, v0
 ; GCN-NEXT:    s_add_i32 s5, s10, s5
 ; GCN-NEXT:    s_mul_i32 s10, s9, s4
-; GCN-NEXT:    s_add_i32 s10, s5, s10
-; GCN-NEXT:    s_sub_i32 s11, s7, s10
+; GCN-NEXT:    s_add_i32 s12, s5, s10
+; GCN-NEXT:    s_sub_i32 s10, s7, s12
 ; GCN-NEXT:    s_mul_i32 s4, s8, s4
 ; GCN-NEXT:    s_sub_i32 s6, s6, s4
 ; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GCN-NEXT:    s_or_b32 s12, s4, s5
-; GCN-NEXT:    s_cmp_lg_u32 s12, 0
-; GCN-NEXT:    s_subb_u32 s11, s11, s9
-; GCN-NEXT:    s_sub_i32 s13, s6, s8
-; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_subb_u32 s13, s10, s9
+; GCN-NEXT:    s_sub_i32 s14, s6, s8
+; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GCN-NEXT:    s_subb_u32 s15, s13, 0
+; GCN-NEXT:    s_cmp_ge_u32 s15, s9
+; GCN-NEXT:    s_cselect_b32 s16, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s14, s8
+; GCN-NEXT:    s_cselect_b32 s17, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s15, s9
+; GCN-NEXT:    s_cselect_b32 s16, s17, s16
+; GCN-NEXT:    s_or_b32 s10, s10, s11
+; GCN-NEXT:    s_cmp_lg_u32 s10, 0
+; GCN-NEXT:    s_subb_u32 s10, s13, s9
+; GCN-NEXT:    s_sub_i32 s11, s14, s8
+; GCN-NEXT:    s_subb_u32 s10, s10, 0
+; GCN-NEXT:    s_cmp_lg_u32 s16, 0
+; GCN-NEXT:    s_cselect_b32 s11, s11, s14
+; GCN-NEXT:    s_cselect_b32 s10, s10, s15
 ; GCN-NEXT:    s_or_b32 s4, s4, s5
 ; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_subb_u32 s14, s11, 0
-; GCN-NEXT:    s_cmp_ge_u32 s14, s9
+; GCN-NEXT:    s_subb_u32 s4, s7, s12
+; GCN-NEXT:    s_cmp_ge_u32 s4, s9
 ; GCN-NEXT:    s_cselect_b32 s5, -1, 0
-; GCN-NEXT:    s_cmp_ge_u32 s13, s8
-; GCN-NEXT:    s_cselect_b32 s15, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s14, s9
-; GCN-NEXT:    s_cselect_b32 s15, s15, s5
-; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_subb_u32 s11, s11, s9
-; GCN-NEXT:    s_sub_i32 s16, s13, s8
-; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GCN-NEXT:    s_or_b32 s4, s4, s5
-; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_subb_u32 s4, s11, 0
-; GCN-NEXT:    s_cmp_lg_u32 s15, 0
-; GCN-NEXT:    s_cselect_b32 s5, s16, s13
-; GCN-NEXT:    s_cselect_b32 s4, s4, s14
-; GCN-NEXT:    s_cmp_lg_u32 s12, 0
-; GCN-NEXT:    s_subb_u32 s7, s7, s10
-; GCN-NEXT:    s_cmp_ge_u32 s7, s9
-; GCN-NEXT:    s_cselect_b32 s10, -1, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s6, s8
-; GCN-NEXT:    s_cselect_b32 s8, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s7, s9
-; GCN-NEXT:    s_cselect_b32 s8, s8, s10
-; GCN-NEXT:    s_cmp_lg_u32 s8, 0
-; GCN-NEXT:    s_cselect_b32 s4, s4, s7
-; GCN-NEXT:    s_cselect_b32 s5, s5, s6
+; GCN-NEXT:    s_cselect_b32 s7, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s4, s9
+; GCN-NEXT:    s_cselect_b32 s5, s7, s5
+; GCN-NEXT:    s_cmp_lg_u32 s5, 0
+; GCN-NEXT:    s_cselect_b32 s4, s10, s4
+; GCN-NEXT:    s_cselect_b32 s5, s11, s6
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    v_mov_b32_e32 v0, s5
 ; GCN-NEXT:    v_mov_b32_e32 v1, s4
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -170,35 +159,35 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b64 s10, s[6:7]
-; GCN-IR-NEXT:    s_flbit_i32_b64 s18, s[2:3]
+; GCN-IR-NEXT:    s_flbit_i32_b64 s16, s[2:3]
 ; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[12:13]
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s18
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s16
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
 ; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
 ; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s9, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT:    s_add_i32 s14, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s14
-; GCN-IR-NEXT:    s_add_u32 s16, s6, -1
-; GCN-IR-NEXT:    s_addc_u32 s17, s7, -1
+; GCN-IR-NEXT:    s_add_u32 s14, s6, -1
+; GCN-IR-NEXT:    s_addc_u32 s15, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[4:5], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s10, s4, s18
-; GCN-IR-NEXT:    s_addc_u32 s11, s5, 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s16, s4, s16
+; GCN-IR-NEXT:    s_addc_u32 s17, s5, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -206,19 +195,19 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s4, s16, s12
-; GCN-IR-NEXT:    s_subb_u32 s4, s17, s13
-; GCN-IR-NEXT:    s_ashr_i32 s14, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s15, s14
-; GCN-IR-NEXT:    s_and_b32 s4, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s4, s14, s12
+; GCN-IR-NEXT:    s_subb_u32 s4, s15, s13
+; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s11, s10
+; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
+; GCN-IR-NEXT:    s_and_b64 s[18:19], s[10:11], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT:    s_add_i32 s16, s16, 1
+; GCN-IR-NEXT:    s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow7
@@ -969,83 +958,76 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
 ; GCN-NEXT:    s_xor_b64 s[4:5], s[2:3], s[4:5]
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s5
-; GCN-NEXT:    s_sub_u32 s10, 0, s4
-; GCN-NEXT:    s_subb_u32 s11, 0, s5
+; GCN-NEXT:    s_sub_u32 s8, 0, s4
+; GCN-NEXT:    s_subb_u32 s9, 0, s5
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
-; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v1, v1
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, s10, v0
-; GCN-NEXT:    v_readfirstlane_b32 s12, v1
-; GCN-NEXT:    v_readfirstlane_b32 s8, v0
-; GCN-NEXT:    s_mul_i32 s9, s10, s12
-; GCN-NEXT:    v_readfirstlane_b32 s15, v2
-; GCN-NEXT:    s_mul_i32 s13, s11, s8
-; GCN-NEXT:    s_mul_i32 s14, s10, s8
-; GCN-NEXT:    s_add_i32 s9, s15, s9
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s14
-; GCN-NEXT:    s_add_i32 s9, s9, s13
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s9
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, s14
-; GCN-NEXT:    v_readfirstlane_b32 s13, v3
-; GCN-NEXT:    s_mul_i32 s15, s8, s9
-; GCN-NEXT:    s_add_u32 s13, s13, s15
-; GCN-NEXT:    v_readfirstlane_b32 s15, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, v1, s9
-; GCN-NEXT:    s_addc_u32 s15, 0, s15
-; GCN-NEXT:    s_mul_i32 s14, s12, s14
-; GCN-NEXT:    v_readfirstlane_b32 s16, v4
-; GCN-NEXT:    s_add_u32 s13, s13, s14
-; GCN-NEXT:    s_addc_u32 s13, s15, s16
+; GCN-NEXT:    v_mul_hi_u32 v2, s8, v0
+; GCN-NEXT:    v_readfirstlane_b32 s10, v1
+; GCN-NEXT:    v_readfirstlane_b32 s2, v0
+; GCN-NEXT:    s_mul_i32 s11, s8, s10
+; GCN-NEXT:    v_readfirstlane_b32 s14, v2
+; GCN-NEXT:    s_mul_i32 s12, s9, s2
+; GCN-NEXT:    s_mul_i32 s13, s8, s2
+; GCN-NEXT:    s_add_i32 s11, s14, s11
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s13
+; GCN-NEXT:    s_add_i32 s11, s11, s12
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s11
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s13
+; GCN-NEXT:    v_readfirstlane_b32 s12, v3
+; GCN-NEXT:    s_mul_i32 s14, s2, s11
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s11
+; GCN-NEXT:    s_add_u32 s12, s12, s14
 ; GCN-NEXT:    v_readfirstlane_b32 s14, v0
-; GCN-NEXT:    s_addc_u32 s14, s14, 0
-; GCN-NEXT:    s_mul_i32 s9, s12, s9
-; GCN-NEXT:    s_add_u32 s9, s13, s9
-; GCN-NEXT:    s_addc_u32 s13, 0, s14
-; GCN-NEXT:    s_add_i32 s14, s8, s9
-; GCN-NEXT:    v_mov_b32_e32 v0, s14
-; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GCN-NEXT:    v_mul_hi_u32 v0, s10, v0
-; GCN-NEXT:    s_or_b32 s8, s8, s9
-; GCN-NEXT:    s_cmp_lg_u32 s8, 0
-; GCN-NEXT:    s_addc_u32 s12, s12, s13
-; GCN-NEXT:    s_mul_i32 s8, s10, s12
-; GCN-NEXT:    v_readfirstlane_b32 s9, v0
-; GCN-NEXT:    s_add_i32 s8, s9, s8
-; GCN-NEXT:    s_mul_i32 s11, s11, s14
-; GCN-NEXT:    s_mul_i32 s9, s10, s14
-; GCN-NEXT:    s_add_i32 s8, s8, s11
-; GCN-NEXT:    v_mov_b32_e32 v2, s9
-; GCN-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-NEXT:    v_mul_hi_u32 v3, s12, v2
-; GCN-NEXT:    v_mul_hi_u32 v2, s14, v2
-; GCN-NEXT:    v_mul_hi_u32 v1, s12, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s14, v0
-; GCN-NEXT:    s_mul_i32 s11, s14, s8
-; GCN-NEXT:    v_readfirstlane_b32 s15, v2
-; GCN-NEXT:    s_add_u32 s11, s15, s11
+; GCN-NEXT:    s_addc_u32 s14, 0, s14
+; GCN-NEXT:    s_mul_i32 s13, s10, s13
+; GCN-NEXT:    v_readfirstlane_b32 s15, v4
+; GCN-NEXT:    s_add_u32 s12, s12, s13
+; GCN-NEXT:    s_addc_u32 s12, s14, s15
+; GCN-NEXT:    v_readfirstlane_b32 s13, v1
+; GCN-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-NEXT:    s_mul_i32 s11, s10, s11
+; GCN-NEXT:    s_add_u32 s11, s12, s11
+; GCN-NEXT:    s_addc_u32 s12, 0, s13
+; GCN-NEXT:    s_add_i32 s11, s2, s11
+; GCN-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-NEXT:    v_mul_hi_u32 v0, s8, v0
+; GCN-NEXT:    s_addc_u32 s10, s10, s12
+; GCN-NEXT:    s_mul_i32 s12, s8, s10
+; GCN-NEXT:    s_mul_i32 s9, s9, s11
 ; GCN-NEXT:    v_readfirstlane_b32 s13, v0
-; GCN-NEXT:    s_mul_i32 s9, s12, s9
-; GCN-NEXT:    s_addc_u32 s13, 0, s13
-; GCN-NEXT:    v_readfirstlane_b32 s10, v3
-; GCN-NEXT:    s_add_u32 s9, s11, s9
-; GCN-NEXT:    s_addc_u32 s9, s13, s10
-; GCN-NEXT:    v_readfirstlane_b32 s10, v1
-; GCN-NEXT:    s_addc_u32 s10, s10, 0
-; GCN-NEXT:    s_mul_i32 s8, s12, s8
-; GCN-NEXT:    s_add_u32 s8, s9, s8
-; GCN-NEXT:    s_addc_u32 s10, 0, s10
-; GCN-NEXT:    s_add_i32 s11, s14, s8
-; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GCN-NEXT:    s_or_b32 s8, s8, s9
-; GCN-NEXT:    s_cmp_lg_u32 s8, 0
-; GCN-NEXT:    s_addc_u32 s10, s12, s10
+; GCN-NEXT:    s_add_i32 s12, s13, s12
+; GCN-NEXT:    s_mul_i32 s8, s8, s11
+; GCN-NEXT:    s_add_i32 s9, s12, s9
+; GCN-NEXT:    v_mov_b32_e32 v2, s8
+; GCN-NEXT:    v_mov_b32_e32 v0, s9
+; GCN-NEXT:    v_mul_hi_u32 v3, s10, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s10, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GCN-NEXT:    s_mul_i32 s13, s11, s9
+; GCN-NEXT:    v_readfirstlane_b32 s15, v2
+; GCN-NEXT:    s_add_u32 s13, s15, s13
+; GCN-NEXT:    v_readfirstlane_b32 s14, v0
+; GCN-NEXT:    s_mul_i32 s8, s10, s8
+; GCN-NEXT:    s_addc_u32 s14, 0, s14
+; GCN-NEXT:    v_readfirstlane_b32 s12, v3
+; GCN-NEXT:    s_add_u32 s8, s13, s8
+; GCN-NEXT:    s_addc_u32 s8, s14, s12
+; GCN-NEXT:    v_readfirstlane_b32 s12, v1
+; GCN-NEXT:    s_addc_u32 s12, s12, 0
+; GCN-NEXT:    s_mul_i32 s9, s10, s9
+; GCN-NEXT:    s_add_u32 s8, s8, s9
+; GCN-NEXT:    s_addc_u32 s9, 0, s12
+; GCN-NEXT:    s_add_i32 s11, s11, s8
+; GCN-NEXT:    s_addc_u32 s10, s10, s9
 ; GCN-NEXT:    s_ashr_i32 s8, s7, 31
 ; GCN-NEXT:    s_add_u32 s6, s6, s8
 ; GCN-NEXT:    s_mov_b32 s9, s8
@@ -1074,49 +1056,45 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
 ; GCN-NEXT:    v_mul_hi_u32 v0, s4, v0
 ; GCN-NEXT:    s_addc_u32 s11, 0, s12
 ; GCN-NEXT:    s_mul_i32 s11, s4, s11
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    v_readfirstlane_b32 s12, v0
 ; GCN-NEXT:    s_add_i32 s11, s12, s11
 ; GCN-NEXT:    s_mul_i32 s12, s5, s10
-; GCN-NEXT:    s_add_i32 s12, s11, s12
-; GCN-NEXT:    s_sub_i32 s13, s7, s12
+; GCN-NEXT:    s_add_i32 s14, s11, s12
+; GCN-NEXT:    s_sub_i32 s12, s7, s14
 ; GCN-NEXT:    s_mul_i32 s10, s4, s10
 ; GCN-NEXT:    s_sub_i32 s6, s6, s10
 ; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GCN-NEXT:    s_or_b32 s14, s10, s11
-; GCN-NEXT:    s_cmp_lg_u32 s14, 0
-; GCN-NEXT:    s_subb_u32 s13, s13, s5
-; GCN-NEXT:    s_sub_i32 s15, s6, s4
-; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
-; GCN-NEXT:    s_or_b32 s10, s10, s11
-; GCN-NEXT:    s_cmp_lg_u32 s10, 0
-; GCN-NEXT:    s_subb_u32 s16, s13, 0
-; GCN-NEXT:    s_cmp_ge_u32 s16, s5
-; GCN-NEXT:    s_cselect_b32 s11, -1, 0
-; GCN-NEXT:    s_cmp_ge_u32 s15, s4
-; GCN-NEXT:    s_cselect_b32 s17, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s16, s5
-; GCN-NEXT:    s_cselect_b32 s17, s17, s11
-; GCN-NEXT:    s_cmp_lg_u32 s10, 0
-; GCN-NEXT:    s_subb_u32 s13, s13, s5
-; GCN-NEXT:    s_sub_i32 s18, s15, s4
-; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GCN-NEXT:    s_subb_u32 s15, s12, s5
+; GCN-NEXT:    s_sub_i32 s16, s6, s4
+; GCN-NEXT:    s_cselect_b64 s[12:13], 1, 0
+; GCN-NEXT:    s_subb_u32 s17, s15, 0
+; GCN-NEXT:    s_cmp_ge_u32 s17, s5
+; GCN-NEXT:    s_cselect_b32 s18, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s16, s4
+; GCN-NEXT:    s_cselect_b32 s19, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s17, s5
+; GCN-NEXT:    s_cselect_b32 s18, s19, s18
+; GCN-NEXT:    s_or_b32 s12, s12, s13
+; GCN-NEXT:    s_cmp_lg_u32 s12, 0
+; GCN-NEXT:    s_subb_u32 s12, s15, s5
+; GCN-NEXT:    s_sub_i32 s13, s16, s4
+; GCN-NEXT:    s_subb_u32 s12, s12, 0
+; GCN-NEXT:    s_cmp_lg_u32 s18, 0
+; GCN-NEXT:    s_cselect_b32 s13, s13, s16
+; GCN-NEXT:    s_cselect_b32 s12, s12, s17
 ; GCN-NEXT:    s_or_b32 s10, s10, s11
 ; GCN-NEXT:    s_cmp_lg_u32 s10, 0
-; GCN-NEXT:    s_subb_u32 s10, s13, 0
-; GCN-NEXT:    s_cmp_lg_u32 s17, 0
-; GCN-NEXT:    s_cselect_b32 s11, s18, s15
-; GCN-NEXT:    s_cselect_b32 s10, s10, s16
-; GCN-NEXT:    s_cmp_lg_u32 s14, 0
-; GCN-NEXT:    s_subb_u32 s7, s7, s12
+; GCN-NEXT:    s_subb_u32 s7, s7, s14
 ; GCN-NEXT:    s_cmp_ge_u32 s7, s5
-; GCN-NEXT:    s_cselect_b32 s12, -1, 0
+; GCN-NEXT:    s_cselect_b32 s10, -1, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s6, s4
 ; GCN-NEXT:    s_cselect_b32 s4, -1, 0
 ; GCN-NEXT:    s_cmp_eq_u32 s7, s5
-; GCN-NEXT:    s_cselect_b32 s4, s4, s12
+; GCN-NEXT:    s_cselect_b32 s4, s4, s10
 ; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_cselect_b32 s5, s10, s7
-; GCN-NEXT:    s_cselect_b32 s4, s11, s6
+; GCN-NEXT:    s_cselect_b32 s5, s12, s7
+; GCN-NEXT:    s_cselect_b32 s4, s13, s6
 ; GCN-NEXT:    s_xor_b64 s[4:5], s[4:5], s[8:9]
 ; GCN-NEXT:    s_sub_u32 s4, s4, s8
 ; GCN-NEXT:    s_subb_u32 s5, s5, s8
@@ -1147,35 +1125,35 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[8:9], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[8:9]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[2:3], s[10:11]
-; GCN-IR-NEXT:    s_flbit_i32_b64 s20, s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s14, s12, s20
+; GCN-IR-NEXT:    s_flbit_i32_b64 s18, s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s14, s12, s18
 ; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[14:15], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[14:15], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[14:15], 63
 ; GCN-IR-NEXT:    s_or_b64 s[16:17], s[10:11], s[16:17]
 ; GCN-IR-NEXT:    s_and_b64 s[10:11], s[16:17], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s11, 0, s7
 ; GCN-IR-NEXT:    s_cselect_b32 s10, 0, s6
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[20:21]
 ; GCN-IR-NEXT:    s_mov_b64 s[2:3], 0
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s16, s14, 1
-; GCN-IR-NEXT:    s_addc_u32 s17, s15, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[16:17], 0
+; GCN-IR-NEXT:    s_add_i32 s16, s14, 1
+; GCN-IR-NEXT:    s_addc_u32 s10, s15, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[10:11], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s14, 63, s14
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[6:7], s14
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[6:7], s16
-; GCN-IR-NEXT:    s_add_u32 s18, s8, -1
-; GCN-IR-NEXT:    s_addc_u32 s19, s9, -1
+; GCN-IR-NEXT:    s_add_u32 s16, s8, -1
+; GCN-IR-NEXT:    s_addc_u32 s17, s9, -1
 ; GCN-IR-NEXT:    s_not_b64 s[2:3], s[12:13]
-; GCN-IR-NEXT:    s_add_u32 s12, s2, s20
-; GCN-IR-NEXT:    s_addc_u32 s13, s3, 0
-; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
+; GCN-IR-NEXT:    s_add_u32 s18, s2, s18
+; GCN-IR-NEXT:    s_addc_u32 s19, s3, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s3, 0
 ; GCN-IR-NEXT:  .LBB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -1183,19 +1161,19 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    s_lshr_b32 s2, s11, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[16:17], s[10:11]
-; GCN-IR-NEXT:    s_sub_u32 s2, s18, s14
-; GCN-IR-NEXT:    s_subb_u32 s2, s19, s15
-; GCN-IR-NEXT:    s_ashr_i32 s16, s2, 31
-; GCN-IR-NEXT:    s_mov_b32 s17, s16
-; GCN-IR-NEXT:    s_and_b32 s2, s16, 1
-; GCN-IR-NEXT:    s_and_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s14, s14, s16
-; GCN-IR-NEXT:    s_subb_u32 s15, s15, s17
-; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
-; GCN-IR-NEXT:    s_mov_b64 s[16:17], s[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s2, s16, s14
+; GCN-IR-NEXT:    s_subb_u32 s2, s17, s15
+; GCN-IR-NEXT:    s_ashr_i32 s12, s2, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s2, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[20:21], s[12:13], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s14, s14, s20
+; GCN-IR-NEXT:    s_subb_u32 s15, s15, s21
+; GCN-IR-NEXT:    s_add_i32 s18, s18, 1
+; GCN-IR-NEXT:    s_addc_u32 s19, s19, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[2:3]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_3
 ; GCN-IR-NEXT:  .LBB8_4: ; %Flow7
@@ -1320,8 +1298,7 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s4
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s5
 ; GCN-NEXT:    s_sub_u32 s2, 0, s4
-; GCN-NEXT:    s_subb_u32 s8, 0, s5
-; GCN-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-NEXT:    s_subb_u32 s6, 0, s5
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -1331,74 +1308,68 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GCN-NEXT:    v_mul_hi_u32 v2, s2, v0
-; GCN-NEXT:    v_readfirstlane_b32 s9, v1
-; GCN-NEXT:    v_readfirstlane_b32 s6, v0
-; GCN-NEXT:    s_mul_i32 s7, s2, s9
-; GCN-NEXT:    v_readfirstlane_b32 s12, v2
-; GCN-NEXT:    s_mul_i32 s10, s8, s6
-; GCN-NEXT:    s_mul_i32 s11, s2, s6
-; GCN-NEXT:    s_add_i32 s7, s12, s7
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s11
-; GCN-NEXT:    s_add_i32 s7, s7, s10
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s7
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, s11
-; GCN-NEXT:    v_readfirstlane_b32 s10, v3
-; GCN-NEXT:    s_mul_i32 s13, s6, s7
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, s7
-; GCN-NEXT:    s_add_u32 s10, s10, s13
-; GCN-NEXT:    v_readfirstlane_b32 s13, v0
-; GCN-NEXT:    s_mul_i32 s11, s9, s11
-; GCN-NEXT:    s_addc_u32 s13, 0, s13
-; GCN-NEXT:    v_readfirstlane_b32 s12, v4
-; GCN-NEXT:    s_add_u32 s10, s10, s11
-; GCN-NEXT:    s_addc_u32 s10, s13, s12
-; GCN-NEXT:    v_readfirstlane_b32 s11, v1
-; GCN-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-NEXT:    s_mul_i32 s7, s9, s7
-; GCN-NEXT:    s_add_u32 s7, s10, s7
-; GCN-NEXT:    s_addc_u32 s10, 0, s11
-; GCN-NEXT:    s_add_i32 s11, s6, s7
-; GCN-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GCN-NEXT:    v_readfirstlane_b32 s7, v1
+; GCN-NEXT:    v_readfirstlane_b32 s3, v0
+; GCN-NEXT:    s_mul_i32 s8, s2, s7
+; GCN-NEXT:    v_readfirstlane_b32 s11, v2
+; GCN-NEXT:    s_mul_i32 s9, s6, s3
+; GCN-NEXT:    s_mul_i32 s10, s2, s3
+; GCN-NEXT:    s_add_i32 s8, s11, s8
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s10
+; GCN-NEXT:    s_add_i32 s8, s8, s9
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s8
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s10
+; GCN-NEXT:    v_readfirstlane_b32 s9, v3
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s8
+; GCN-NEXT:    s_mul_i32 s12, s3, s8
+; GCN-NEXT:    s_add_u32 s9, s9, s12
+; GCN-NEXT:    v_readfirstlane_b32 s12, v0
+; GCN-NEXT:    s_mul_i32 s10, s7, s10
+; GCN-NEXT:    s_addc_u32 s12, 0, s12
+; GCN-NEXT:    v_readfirstlane_b32 s11, v4
+; GCN-NEXT:    s_add_u32 s9, s9, s10
+; GCN-NEXT:    v_readfirstlane_b32 s13, v1
+; GCN-NEXT:    s_addc_u32 s9, s12, s11
+; GCN-NEXT:    s_addc_u32 s10, s13, 0
+; GCN-NEXT:    s_mul_i32 s8, s7, s8
+; GCN-NEXT:    s_add_u32 s8, s9, s8
+; GCN-NEXT:    s_addc_u32 s9, 0, s10
+; GCN-NEXT:    s_add_i32 s8, s3, s8
+; GCN-NEXT:    v_mov_b32_e32 v0, s8
 ; GCN-NEXT:    v_mul_hi_u32 v0, s2, v0
-; GCN-NEXT:    s_or_b32 s6, s6, s7
-; GCN-NEXT:    s_cmp_lg_u32 s6, 0
-; GCN-NEXT:    s_addc_u32 s9, s9, s10
-; GCN-NEXT:    s_mul_i32 s6, s2, s9
-; GCN-NEXT:    v_readfirstlane_b32 s7, v0
-; GCN-NEXT:    s_add_i32 s6, s7, s6
-; GCN-NEXT:    s_mul_i32 s8, s8, s11
-; GCN-NEXT:    s_mul_i32 s2, s2, s11
-; GCN-NEXT:    s_add_i32 s6, s6, s8
+; GCN-NEXT:    s_addc_u32 s7, s7, s9
+; GCN-NEXT:    s_mul_i32 s9, s2, s7
+; GCN-NEXT:    s_mul_i32 s6, s6, s8
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    s_add_i32 s9, s10, s9
+; GCN-NEXT:    s_mul_i32 s2, s2, s8
+; GCN-NEXT:    s_add_i32 s6, s9, s6
 ; GCN-NEXT:    v_mov_b32_e32 v2, s2
 ; GCN-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-NEXT:    v_mul_hi_u32 v3, s9, v2
-; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
-; GCN-NEXT:    v_mul_hi_u32 v1, s9, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    s_mul_i32 s8, s11, s6
+; GCN-NEXT:    v_mul_hi_u32 v3, s7, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s8, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s7, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s8, v0
+; GCN-NEXT:    s_mul_i32 s10, s8, s6
 ; GCN-NEXT:    v_readfirstlane_b32 s12, v2
-; GCN-NEXT:    s_add_u32 s8, s12, s8
-; GCN-NEXT:    v_readfirstlane_b32 s10, v0
-; GCN-NEXT:    s_mul_i32 s2, s9, s2
-; GCN-NEXT:    s_addc_u32 s10, 0, s10
-; GCN-NEXT:    v_readfirstlane_b32 s7, v3
-; GCN-NEXT:    s_add_u32 s2, s8, s2
-; GCN-NEXT:    s_addc_u32 s2, s10, s7
-; GCN-NEXT:    v_readfirstlane_b32 s7, v1
-; GCN-NEXT:    s_addc_u32 s7, s7, 0
-; GCN-NEXT:    s_mul_i32 s6, s9, s6
+; GCN-NEXT:    s_add_u32 s10, s12, s10
+; GCN-NEXT:    v_readfirstlane_b32 s11, v0
+; GCN-NEXT:    s_mul_i32 s2, s7, s2
+; GCN-NEXT:    s_addc_u32 s11, 0, s11
+; GCN-NEXT:    v_readfirstlane_b32 s9, v3
+; GCN-NEXT:    s_add_u32 s2, s10, s2
+; GCN-NEXT:    s_addc_u32 s2, s11, s9
+; GCN-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-NEXT:    s_mul_i32 s6, s7, s6
 ; GCN-NEXT:    s_add_u32 s2, s2, s6
-; GCN-NEXT:    s_addc_u32 s8, 0, s7
-; GCN-NEXT:    s_add_i32 s11, s11, s2
-; GCN-NEXT:    s_cselect_b64 s[6:7], 1, 0
-; GCN-NEXT:    s_or_b32 s2, s6, s7
-; GCN-NEXT:    s_cmp_lg_u32 s2, 0
-; GCN-NEXT:    s_addc_u32 s6, s9, s8
-; GCN-NEXT:    v_mul_hi_u32 v1, s11, 24
+; GCN-NEXT:    s_addc_u32 s6, 0, s9
+; GCN-NEXT:    s_add_i32 s8, s8, s2
+; GCN-NEXT:    s_addc_u32 s6, s7, s6
+; GCN-NEXT:    v_mul_hi_u32 v1, s8, 24
 ; GCN-NEXT:    v_mul_hi_u32 v0, s6, 24
 ; GCN-NEXT:    s_mul_i32 s6, s6, 24
-; GCN-NEXT:    s_mov_b32 s2, -1
+; GCN-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-NEXT:    v_readfirstlane_b32 s8, v1
 ; GCN-NEXT:    v_readfirstlane_b32 s7, v0
 ; GCN-NEXT:    s_add_u32 s6, s8, s6
@@ -1407,46 +1378,42 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-NEXT:    v_mul_hi_u32 v0, s4, v0
 ; GCN-NEXT:    s_mul_i32 s7, s5, s6
 ; GCN-NEXT:    s_mul_i32 s6, s4, s6
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    v_readfirstlane_b32 s8, v0
-; GCN-NEXT:    s_add_i32 s8, s8, s7
-; GCN-NEXT:    s_sub_i32 s9, 0, s8
-; GCN-NEXT:    s_sub_i32 s10, 24, s6
-; GCN-NEXT:    s_cselect_b64 s[6:7], 1, 0
-; GCN-NEXT:    s_or_b32 s11, s6, s7
-; GCN-NEXT:    s_cmp_lg_u32 s11, 0
-; GCN-NEXT:    s_subb_u32 s9, s9, s5
-; GCN-NEXT:    s_sub_i32 s12, s10, s4
+; GCN-NEXT:    s_add_i32 s10, s8, s7
+; GCN-NEXT:    s_sub_i32 s8, 0, s10
+; GCN-NEXT:    s_sub_i32 s11, 24, s6
 ; GCN-NEXT:    s_cselect_b64 s[6:7], 1, 0
+; GCN-NEXT:    s_subb_u32 s12, s8, s5
+; GCN-NEXT:    s_sub_i32 s13, s11, s4
+; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GCN-NEXT:    s_subb_u32 s14, s12, 0
+; GCN-NEXT:    s_cmp_ge_u32 s14, s5
+; GCN-NEXT:    s_cselect_b32 s15, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s13, s4
+; GCN-NEXT:    s_cselect_b32 s16, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s14, s5
+; GCN-NEXT:    s_cselect_b32 s15, s16, s15
+; GCN-NEXT:    s_or_b32 s8, s8, s9
+; GCN-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-NEXT:    s_subb_u32 s8, s12, s5
+; GCN-NEXT:    s_sub_i32 s9, s13, s4
+; GCN-NEXT:    s_subb_u32 s8, s8, 0
+; GCN-NEXT:    s_cmp_lg_u32 s15, 0
+; GCN-NEXT:    s_cselect_b32 s9, s9, s13
+; GCN-NEXT:    s_cselect_b32 s8, s8, s14
 ; GCN-NEXT:    s_or_b32 s6, s6, s7
 ; GCN-NEXT:    s_cmp_lg_u32 s6, 0
-; GCN-NEXT:    s_subb_u32 s13, s9, 0
-; GCN-NEXT:    s_cmp_ge_u32 s13, s5
+; GCN-NEXT:    s_subb_u32 s6, 0, s10
+; GCN-NEXT:    s_cmp_ge_u32 s6, s5
 ; GCN-NEXT:    s_cselect_b32 s7, -1, 0
-; GCN-NEXT:    s_cmp_ge_u32 s12, s4
-; GCN-NEXT:    s_cselect_b32 s14, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s13, s5
-; GCN-NEXT:    s_cselect_b32 s14, s14, s7
-; GCN-NEXT:    s_cmp_lg_u32 s6, 0
-; GCN-NEXT:    s_subb_u32 s9, s9, s5
-; GCN-NEXT:    s_sub_i32 s15, s12, s4
-; GCN-NEXT:    s_cselect_b64 s[6:7], 1, 0
-; GCN-NEXT:    s_or_b32 s6, s6, s7
-; GCN-NEXT:    s_cmp_lg_u32 s6, 0
-; GCN-NEXT:    s_subb_u32 s6, s9, 0
-; GCN-NEXT:    s_cmp_lg_u32 s14, 0
-; GCN-NEXT:    s_cselect_b32 s7, s15, s12
-; GCN-NEXT:    s_cselect_b32 s6, s6, s13
-; GCN-NEXT:    s_cmp_lg_u32 s11, 0
-; GCN-NEXT:    s_subb_u32 s8, 0, s8
-; GCN-NEXT:    s_cmp_ge_u32 s8, s5
-; GCN-NEXT:    s_cselect_b32 s9, -1, 0
-; GCN-NEXT:    s_cmp_ge_u32 s10, s4
+; GCN-NEXT:    s_cmp_ge_u32 s11, s4
 ; GCN-NEXT:    s_cselect_b32 s4, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s8, s5
-; GCN-NEXT:    s_cselect_b32 s4, s4, s9
+; GCN-NEXT:    s_cmp_eq_u32 s6, s5
+; GCN-NEXT:    s_cselect_b32 s4, s4, s7
 ; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_cselect_b32 s4, s6, s8
-; GCN-NEXT:    s_cselect_b32 s5, s7, s10
+; GCN-NEXT:    s_cselect_b32 s4, s8, s6
+; GCN-NEXT:    s_cselect_b32 s5, s9, s11
 ; GCN-NEXT:    v_mov_b32_e32 v0, s5
 ; GCN-NEXT:    v_mov_b32_e32 v1, s4
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -1462,34 +1429,34 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[2:3], s[8:9]
 ; GCN-IR-NEXT:    s_sub_u32 s4, s2, s8
 ; GCN-IR-NEXT:    s_subb_u32 s5, s3, s8
-; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[4:5]
-; GCN-IR-NEXT:    s_add_u32 s2, s12, 0xffffffc5
+; GCN-IR-NEXT:    s_flbit_i32_b64 s14, s[4:5]
+; GCN-IR-NEXT:    s_add_u32 s2, s14, 0xffffffc5
 ; GCN-IR-NEXT:    s_addc_u32 s3, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[4:5], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[2:3], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[2:3], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 63
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[8:9], s[10:11]
 ; GCN-IR-NEXT:    s_and_b64 s[8:9], s[10:11], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s8, s2, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s3, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
+; GCN-IR-NEXT:    s_add_i32 s8, s2, 1
+; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[10:11], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s2, 63, s2
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], 24, s2
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s8
-; GCN-IR-NEXT:    s_add_u32 s14, s4, -1
-; GCN-IR-NEXT:    s_addc_u32 s15, s5, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_add_u32 s12, s4, -1
+; GCN-IR-NEXT:    s_addc_u32 s13, s5, -1
+; GCN-IR-NEXT:    s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -1497,19 +1464,19 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_lshr_b32 s6, s3, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[2:3], 1
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[6:7]
-; GCN-IR-NEXT:    s_or_b64 s[2:3], s[12:13], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s6, s14, s10
-; GCN-IR-NEXT:    s_subb_u32 s6, s15, s11
-; GCN-IR-NEXT:    s_ashr_i32 s12, s6, 31
-; GCN-IR-NEXT:    s_mov_b32 s13, s12
-; GCN-IR-NEXT:    s_and_b32 s6, s12, 1
-; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT:    s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[8:9], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s6, s12, s10
+; GCN-IR-NEXT:    s_subb_u32 s6, s13, s11
+; GCN-IR-NEXT:    s_ashr_i32 s8, s6, 31
+; GCN-IR-NEXT:    s_mov_b32 s9, s8
+; GCN-IR-NEXT:    s_and_b32 s6, s8, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 1
+; GCN-IR-NEXT:    s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_3
 ; GCN-IR-NEXT:  .LBB10_4: ; %Flow6
diff --git a/llvm/test/CodeGen/AMDGPU/uaddo.ll b/llvm/test/CodeGen/AMDGPU/uaddo.ll
index e1574dcd45462..acd17c43c0c76 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddo.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddo.ll
@@ -14,15 +14,13 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
-; SI-NEXT:    s_add_u32 s0, s2, s8
-; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    s_add_i32 s2, s2, s8
+; SI-NEXT:    s_addc_u32 s3, s3, s9
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    s_addc_u32 s1, s3, s9
+; SI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
 ; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -33,15 +31,13 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_add_u32 s0, s2, s4
-; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_add_i32 s2, s2, s4
+; VI-NEXT:    s_addc_u32 s3, s3, s5
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    s_addc_u32 s1, s3, s5
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s2, v2
 ; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
@@ -52,13 +48,11 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-NEXT:    s_add_u32 s4, s2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    s_add_i32 s4, s2, s6
 ; GFX9-NEXT:    s_addc_u32 s5, s3, s7
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1]
+; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[2:3]
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s4, v0
 ; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -71,12 +65,12 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX10-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_add_u32 s4, s2, s6
-; GFX10-NEXT:    s_addc_u32 s5, s3, s7
-; GFX10-NEXT:    v_cmp_lt_u64_e64 s2, s[4:5], s[2:3]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-NEXT:    v_add_co_u32 v0, s2, s4, v0
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, s2, s5, 0, s2
+; GFX10-NEXT:    s_add_i32 s2, s2, s6
+; GFX10-NEXT:    s_addc_u32 s3, s3, s7
+; GFX10-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
+; GFX10-NEXT:    v_add_co_u32 v0, s2, s2, v0
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, s2, s3, 0, s2
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX10-NEXT:    s_endpgm
 ;
@@ -87,14 +81,14 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX11-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_add_u32 s4, s2, s4
-; GFX11-NEXT:    s_addc_u32 s5, s3, s5
+; GFX11-NEXT:    s_add_i32 s2, s2, s4
+; GFX11-NEXT:    s_addc_u32 s3, s3, s5
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_cmp_lt_u64_e64 s2, s[4:5], s[2:3]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v0, s2, s4, v0
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s5, 0, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
+; GFX11-NEXT:    v_add_co_u32 v0, s2, s2, v0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s3, 0, s2
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    s_endpgm
   %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
@@ -436,21 +430,20 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_add_u32 s6, s4, s6
-; SI-NEXT:    v_mov_b32_e32 v0, s4
-; SI-NEXT:    s_addc_u32 s7, s5, s7
-; SI-NEXT:    v_mov_b32_e32 v1, s5
-; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    s_add_i32 s4, s4, s6
+; SI-NEXT:    s_addc_u32 s5, s5, s7
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; SI-NEXT:    s_mov_b32 s0, s2
 ; SI-NEXT:    s_mov_b32 s1, s3
 ; SI-NEXT:    s_mov_b32 s2, s10
 ; SI-NEXT:    s_mov_b32 s3, s11
-; SI-NEXT:    v_mov_b32_e32 v3, s7
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
 ; SI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -459,36 +452,32 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_add_u32 s0, s4, s6
-; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    s_add_i32 s0, s4, s6
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    s_addc_u32 s1, s5, s7
-; VI-NEXT:    v_mov_b32_e32 v5, s5
-; VI-NEXT:    v_mov_b32_e32 v7, s1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v6, s0
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; VI-NEXT:    flat_store_byte v[2:3], v0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: s_uaddo_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_add_u32 s0, s12, s14
-; GFX9-NEXT:    v_mov_b32_e32 v0, s12
-; GFX9-NEXT:    v_mov_b32_e32 v1, s13
+; GFX9-NEXT:    s_add_i32 s0, s12, s14
 ; GFX9-NEXT:    s_addc_u32 s1, s13, s15
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT:    global_store_byte v2, v3, s[10:11]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: s_uaddo_i64:
@@ -496,11 +485,11 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX10-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_add_u32 s0, s12, s14
+; GFX10-NEXT:    s_add_i32 s0, s12, s14
 ; GFX10-NEXT:    s_addc_u32 s1, s13, s15
 ; GFX10-NEXT:    v_mov_b32_e32 v0, s0
+; GFX10-NEXT:    s_cselect_b32 s0, -1, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s1
-; GFX10-NEXT:    v_cmp_lt_u64_e64 s0, s[0:1], s[12:13]
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s0
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
 ; GFX10-NEXT:    global_store_byte v2, v3, s[10:11]
@@ -510,12 +499,11 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_add_u32 s6, s4, s6
-; GFX11-NEXT:    s_addc_u32 s7, s5, s7
-; GFX11-NEXT:    v_mov_b32_e32 v0, s6
-; GFX11-NEXT:    v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    s_add_i32 s4, s4, s6
+; GFX11-NEXT:    s_addc_u32 s5, s5, s7
+; GFX11-NEXT:    v_mov_b32_e32 v0, s4
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -551,10 +539,10 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    s_mov_b32 s4, s2
 ; SI-NEXT:    s_mov_b32 s5, s3
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_add_i32_e32 v2, vcc, v0, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; SI-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; SI-NEXT:    buffer_store_byte v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -574,10 +562,9 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; VI-NEXT:    v_mov_b32_e32 v6, s2
 ; VI-NEXT:    v_mov_b32_e32 v7, s3
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v2, vcc, v0, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; VI-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; VI-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; VI-NEXT:    flat_store_byte v[6:7], v0
 ; VI-NEXT:    s_endpgm
@@ -590,10 +577,9 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX9-NEXT:    global_load_dwordx2 v[0:1], v4, s[12:13]
 ; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[14:15]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT:    global_store_dwordx2 v4, v[0:1], s[8:9]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
 ; GFX9-NEXT:    s_endpgm
@@ -607,12 +593,11 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX10-NEXT:    global_load_dwordx2 v[0:1], v4, s[12:13]
 ; GFX10-NEXT:    global_load_dwordx2 v[2:3], v4, s[14:15]
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX10-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX10-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX10-NEXT:    global_store_dwordx2 v4, v[0:1], s[8:9]
+; GFX10-NEXT:    global_store_byte v4, v2, s[10:11]
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: v_uaddo_i64:
@@ -624,14 +609,12 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX11-NEXT:    global_load_b64 v[0:1], v4, s[4:5]
 ; GFX11-NEXT:    global_load_b64 v[2:3], v4, s[6:7]
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    global_store_b64 v4, v[2:3], s[0:1]
-; GFX11-NEXT:    global_store_b8 v4, v0, s[2:3]
+; GFX11-NEXT:    global_store_b64 v4, v[0:1], s[0:1]
+; GFX11-NEXT:    global_store_b8 v4, v2, s[2:3]
 ; GFX11-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 6efce53aaa5fe..5d38039e1daf4 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -145,9 +145,9 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT:    s_add_i32 s14, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
@@ -157,9 +157,9 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_add_u32 s14, s6, -1
 ; GCN-IR-NEXT:    s_addc_u32 s15, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[2:3], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s2, s2, s16
-; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    s_add_u32 s10, s2, s16
+; GCN-IR-NEXT:    s_addc_u32 s11, s3, 0
+; GCN-IR-NEXT:    s_mov_b64 s[2:3], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -167,19 +167,19 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s4, s14, s12
-; GCN-IR-NEXT:    s_subb_u32 s4, s15, s13
-; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s11, s10
-; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[10:11], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s10
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, s11
-; GCN-IR-NEXT:    s_add_u32 s2, s2, 1
-; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[2:3], 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[2:3], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s2, s14, s12
+; GCN-IR-NEXT:    s_subb_u32 s2, s15, s13
+; GCN-IR-NEXT:    s_ashr_i32 s2, s2, 31
+; GCN-IR-NEXT:    s_mov_b32 s3, s2
+; GCN-IR-NEXT:    s_and_b32 s4, s2, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[2:3], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s16
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s17
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[2:3], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow7
@@ -786,12 +786,11 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-LABEL: s_test_udiv_k_num_i64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GCN-NEXT:    s_sub_u32 s6, 0, s2
-; GCN-NEXT:    s_subb_u32 s8, 0, s3
+; GCN-NEXT:    s_sub_u32 s4, 0, s2
+; GCN-NEXT:    s_subb_u32 s5, 0, s3
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -800,75 +799,69 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, s6, v0
-; GCN-NEXT:    v_readfirstlane_b32 s9, v1
-; GCN-NEXT:    v_readfirstlane_b32 s4, v0
-; GCN-NEXT:    s_mul_i32 s5, s6, s9
-; GCN-NEXT:    v_readfirstlane_b32 s12, v2
-; GCN-NEXT:    s_mul_i32 s10, s8, s4
-; GCN-NEXT:    s_mul_i32 s11, s6, s4
-; GCN-NEXT:    s_add_i32 s5, s12, s5
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s11
-; GCN-NEXT:    s_add_i32 s5, s5, s10
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s5
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, s11
-; GCN-NEXT:    v_readfirstlane_b32 s10, v3
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, s5
-; GCN-NEXT:    s_mul_i32 s13, s4, s5
-; GCN-NEXT:    s_add_u32 s10, s10, s13
-; GCN-NEXT:    v_readfirstlane_b32 s13, v0
-; GCN-NEXT:    s_mul_i32 s11, s9, s11
-; GCN-NEXT:    s_addc_u32 s13, 0, s13
-; GCN-NEXT:    v_readfirstlane_b32 s12, v4
-; GCN-NEXT:    s_add_u32 s10, s10, s11
-; GCN-NEXT:    v_readfirstlane_b32 s14, v1
-; GCN-NEXT:    s_addc_u32 s10, s13, s12
-; GCN-NEXT:    s_addc_u32 s11, s14, 0
-; GCN-NEXT:    s_mul_i32 s5, s9, s5
-; GCN-NEXT:    s_add_u32 s5, s10, s5
-; GCN-NEXT:    s_addc_u32 s10, 0, s11
-; GCN-NEXT:    s_add_i32 s11, s4, s5
-; GCN-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GCN-NEXT:    v_mul_hi_u32 v0, s6, v0
-; GCN-NEXT:    s_or_b32 s4, s4, s5
-; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_addc_u32 s9, s9, s10
-; GCN-NEXT:    s_mul_i32 s4, s6, s9
-; GCN-NEXT:    v_readfirstlane_b32 s5, v0
-; GCN-NEXT:    s_add_i32 s4, s5, s4
-; GCN-NEXT:    s_mul_i32 s8, s8, s11
-; GCN-NEXT:    s_mul_i32 s5, s6, s11
-; GCN-NEXT:    s_add_i32 s4, s4, s8
-; GCN-NEXT:    v_mov_b32_e32 v2, s5
-; GCN-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-NEXT:    v_mul_hi_u32 v3, s9, v2
-; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
-; GCN-NEXT:    v_mul_hi_u32 v1, s9, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    s_mul_i32 s8, s11, s4
-; GCN-NEXT:    v_readfirstlane_b32 s12, v2
-; GCN-NEXT:    s_add_u32 s8, s12, s8
-; GCN-NEXT:    v_readfirstlane_b32 s10, v0
-; GCN-NEXT:    s_mul_i32 s5, s9, s5
-; GCN-NEXT:    s_addc_u32 s10, 0, s10
-; GCN-NEXT:    v_readfirstlane_b32 s6, v3
-; GCN-NEXT:    s_add_u32 s5, s8, s5
-; GCN-NEXT:    s_addc_u32 s5, s10, s6
+; GCN-NEXT:    v_mul_hi_u32 v2, s4, v0
 ; GCN-NEXT:    v_readfirstlane_b32 s6, v1
-; GCN-NEXT:    s_addc_u32 s6, s6, 0
-; GCN-NEXT:    s_mul_i32 s4, s9, s4
-; GCN-NEXT:    s_add_u32 s4, s5, s4
-; GCN-NEXT:    s_addc_u32 s6, 0, s6
-; GCN-NEXT:    s_add_i32 s11, s11, s4
-; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GCN-NEXT:    s_or_b32 s4, s4, s5
-; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_addc_u32 s4, s9, s6
-; GCN-NEXT:    v_mul_hi_u32 v1, s11, 24
+; GCN-NEXT:    v_readfirstlane_b32 s7, v0
+; GCN-NEXT:    s_mul_i32 s8, s4, s6
+; GCN-NEXT:    v_readfirstlane_b32 s11, v2
+; GCN-NEXT:    s_mul_i32 s9, s5, s7
+; GCN-NEXT:    s_mul_i32 s10, s4, s7
+; GCN-NEXT:    s_add_i32 s8, s11, s8
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s10
+; GCN-NEXT:    s_add_i32 s8, s8, s9
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s8
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s10
+; GCN-NEXT:    v_readfirstlane_b32 s9, v3
+; GCN-NEXT:    s_mul_i32 s12, s7, s8
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s8
+; GCN-NEXT:    s_add_u32 s9, s9, s12
+; GCN-NEXT:    v_readfirstlane_b32 s12, v0
+; GCN-NEXT:    s_mul_i32 s10, s6, s10
+; GCN-NEXT:    s_addc_u32 s12, 0, s12
+; GCN-NEXT:    v_readfirstlane_b32 s11, v4
+; GCN-NEXT:    s_add_u32 s9, s9, s10
+; GCN-NEXT:    v_readfirstlane_b32 s13, v1
+; GCN-NEXT:    s_addc_u32 s9, s12, s11
+; GCN-NEXT:    s_mul_i32 s8, s6, s8
+; GCN-NEXT:    s_addc_u32 s10, s13, 0
+; GCN-NEXT:    s_add_u32 s8, s9, s8
+; GCN-NEXT:    s_addc_u32 s9, 0, s10
+; GCN-NEXT:    s_add_i32 s8, s7, s8
+; GCN-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-NEXT:    v_mul_hi_u32 v0, s4, v0
+; GCN-NEXT:    s_addc_u32 s6, s6, s9
+; GCN-NEXT:    s_mul_i32 s9, s4, s6
+; GCN-NEXT:    s_mul_i32 s5, s5, s8
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    s_add_i32 s9, s10, s9
+; GCN-NEXT:    s_mul_i32 s4, s4, s8
+; GCN-NEXT:    s_add_i32 s5, s9, s5
+; GCN-NEXT:    v_mov_b32_e32 v2, s4
+; GCN-NEXT:    v_mov_b32_e32 v0, s5
+; GCN-NEXT:    v_mul_hi_u32 v3, s6, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s8, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s6, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s8, v0
+; GCN-NEXT:    s_mul_i32 s10, s8, s5
+; GCN-NEXT:    v_readfirstlane_b32 s12, v2
+; GCN-NEXT:    s_add_u32 s10, s12, s10
+; GCN-NEXT:    v_readfirstlane_b32 s11, v0
+; GCN-NEXT:    s_mul_i32 s4, s6, s4
+; GCN-NEXT:    s_addc_u32 s11, 0, s11
+; GCN-NEXT:    v_readfirstlane_b32 s9, v3
+; GCN-NEXT:    s_add_u32 s4, s10, s4
+; GCN-NEXT:    s_addc_u32 s4, s11, s9
+; GCN-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-NEXT:    s_mul_i32 s5, s6, s5
+; GCN-NEXT:    s_add_u32 s4, s4, s5
+; GCN-NEXT:    s_addc_u32 s5, 0, s9
+; GCN-NEXT:    s_add_i32 s8, s8, s4
+; GCN-NEXT:    s_addc_u32 s4, s6, s5
+; GCN-NEXT:    v_mul_hi_u32 v1, s8, 24
 ; GCN-NEXT:    v_mul_hi_u32 v0, s4, 24
 ; GCN-NEXT:    s_mul_i32 s4, s4, 24
-; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    v_readfirstlane_b32 s8, v1
 ; GCN-NEXT:    v_readfirstlane_b32 s5, v0
 ; GCN-NEXT:    s_add_u32 s4, s8, s4
@@ -884,40 +877,37 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-NEXT:    s_mul_i32 s0, s2, s8
 ; GCN-NEXT:    s_sub_i32 s11, 24, s0
 ; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GCN-NEXT:    s_or_b32 s12, s0, s1
-; GCN-NEXT:    s_cmp_lg_u32 s12, 0
 ; GCN-NEXT:    s_subb_u32 s10, s10, s3
-; GCN-NEXT:    s_sub_i32 s13, s11, s2
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    s_sub_i32 s12, s11, s2
+; GCN-NEXT:    s_subb_u32 s10, s10, 0
+; GCN-NEXT:    s_cmp_ge_u32 s10, s3
+; GCN-NEXT:    s_cselect_b32 s13, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s12, s2
+; GCN-NEXT:    s_cselect_b32 s12, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s10, s3
+; GCN-NEXT:    s_cselect_b32 s10, s12, s13
+; GCN-NEXT:    s_add_u32 s12, s8, 1
+; GCN-NEXT:    s_addc_u32 s13, 0, 0
+; GCN-NEXT:    s_add_u32 s14, s8, 2
+; GCN-NEXT:    s_addc_u32 s15, 0, 0
+; GCN-NEXT:    s_cmp_lg_u32 s10, 0
+; GCN-NEXT:    s_cselect_b32 s10, s14, s12
+; GCN-NEXT:    s_cselect_b32 s12, s15, s13
 ; GCN-NEXT:    s_or_b32 s0, s0, s1
 ; GCN-NEXT:    s_cmp_lg_u32 s0, 0
-; GCN-NEXT:    s_subb_u32 s0, s10, 0
+; GCN-NEXT:    s_subb_u32 s0, 0, s9
 ; GCN-NEXT:    s_cmp_ge_u32 s0, s3
 ; GCN-NEXT:    s_cselect_b32 s1, -1, 0
-; GCN-NEXT:    s_cmp_ge_u32 s13, s2
-; GCN-NEXT:    s_cselect_b32 s10, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s0, s3
-; GCN-NEXT:    s_cselect_b32 s0, s10, s1
-; GCN-NEXT:    s_add_u32 s1, s8, 1
-; GCN-NEXT:    s_addc_u32 s10, 0, 0
-; GCN-NEXT:    s_add_u32 s13, s8, 2
-; GCN-NEXT:    s_addc_u32 s14, 0, 0
-; GCN-NEXT:    s_cmp_lg_u32 s0, 0
-; GCN-NEXT:    s_cselect_b32 s0, s13, s1
-; GCN-NEXT:    s_cselect_b32 s1, s14, s10
-; GCN-NEXT:    s_cmp_lg_u32 s12, 0
-; GCN-NEXT:    s_subb_u32 s9, 0, s9
-; GCN-NEXT:    s_cmp_ge_u32 s9, s3
-; GCN-NEXT:    s_cselect_b32 s10, -1, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s11, s2
 ; GCN-NEXT:    s_cselect_b32 s2, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s9, s3
-; GCN-NEXT:    s_cselect_b32 s2, s2, s10
-; GCN-NEXT:    s_cmp_lg_u32 s2, 0
-; GCN-NEXT:    s_cselect_b32 s1, s1, 0
-; GCN-NEXT:    s_cselect_b32 s0, s0, s8
-; GCN-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-NEXT:    v_mov_b32_e32 v1, s1
+; GCN-NEXT:    s_cmp_eq_u32 s0, s3
+; GCN-NEXT:    s_cselect_b32 s0, s2, s1
+; GCN-NEXT:    s_cmp_lg_u32 s0, 0
+; GCN-NEXT:    s_cselect_b32 s0, s12, 0
+; GCN-NEXT:    s_cselect_b32 s1, s10, s8
+; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mov_b32_e32 v1, s0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-NEXT:    s_endpgm
 ;
@@ -926,34 +916,34 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc5
+; GCN-IR-NEXT:    s_flbit_i32_b64 s14, s[2:3]
+; GCN-IR-NEXT:    s_add_u32 s8, s14, 0xffffffc5
 ; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[6:7], s[10:11]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[10:11], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_add_i32 s10, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
-; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
-; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_add_u32 s12, s2, -1
+; GCN-IR-NEXT:    s_addc_u32 s13, s3, -1
+; GCN-IR-NEXT:    s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -961,19 +951,19 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s4, s14, s10
-; GCN-IR-NEXT:    s_subb_u32 s4, s15, s11
-; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s13, s12
-; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
-; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT:    s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, s12, s10
+; GCN-IR-NEXT:    s_subb_u32 s4, s13, s11
+; GCN-IR-NEXT:    s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s9, s8
+; GCN-IR-NEXT:    s_and_b32 s4, s8, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[8:9], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 1
+; GCN-IR-NEXT:    s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_3
 ; GCN-IR-NEXT:  .LBB8_4: ; %Flow6
@@ -1291,52 +1281,52 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s8, 59, s12
+; GCN-IR-NEXT:    s_flbit_i32_b64 s10, s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s8, 59, s10
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[4:5], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s7, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_add_i32 s11, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[8:9], s[2:3], s10
-; GCN-IR-NEXT:    s_add_u32 s2, s12, 0xffffffc4
-; GCN-IR-NEXT:    s_addc_u32 s3, 0, -1
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    s_lshr_b64 s[2:3], s[2:3], s11
+; GCN-IR-NEXT:    s_add_u32 s10, s10, 0xffffffc4
+; GCN-IR-NEXT:    s_addc_u32 s11, 0, -1
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[2:3], 1
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[6:7], s[10:11], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s4, 23, s8
-; GCN-IR-NEXT:    s_subb_u32 s4, 0, s9
-; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
-; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
-; GCN-IR-NEXT:    s_and_b32 s10, s10, 24
-; GCN-IR-NEXT:    s_sub_u32 s8, s8, s10
-; GCN-IR-NEXT:    s_subb_u32 s9, s9, 0
-; GCN-IR-NEXT:    s_add_u32 s2, s2, 1
-; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[2:3], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, 23, s2
+; GCN-IR-NEXT:    s_subb_u32 s4, 0, s3
+; GCN-IR-NEXT:    s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT:    s_and_b32 s4, s8, 1
+; GCN-IR-NEXT:    s_and_b32 s8, s8, 24
+; GCN-IR-NEXT:    s_sub_u32 s2, s2, s8
+; GCN-IR-NEXT:    s_subb_u32 s3, s3, 0
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[12:13], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_3
 ; GCN-IR-NEXT:  .LBB11_4: ; %Flow6
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 05c003eefa850..aaf355c7dc407 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -8,12 +8,11 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-NEXT:    s_load_dwordx2 s[8:9], s[4:5], 0xd
 ; GCN-NEXT:    s_load_dwordx4 s[4:7], s[4:5], 0x9
 ; GCN-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s9
-; GCN-NEXT:    s_sub_u32 s10, 0, s8
-; GCN-NEXT:    s_subb_u32 s11, 0, s9
+; GCN-NEXT:    s_sub_u32 s0, 0, s8
+; GCN-NEXT:    s_subb_u32 s1, 0, s9
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -22,71 +21,65 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, s10, v0
-; GCN-NEXT:    v_readfirstlane_b32 s12, v1
-; GCN-NEXT:    v_readfirstlane_b32 s0, v0
-; GCN-NEXT:    s_mul_i32 s1, s10, s12
-; GCN-NEXT:    v_readfirstlane_b32 s15, v2
-; GCN-NEXT:    s_mul_i32 s13, s11, s0
-; GCN-NEXT:    s_mul_i32 s14, s10, s0
-; GCN-NEXT:    s_add_i32 s1, s15, s1
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s14
-; GCN-NEXT:    s_add_i32 s1, s1, s13
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s1
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, s14
-; GCN-NEXT:    v_readfirstlane_b32 s13, v3
-; GCN-NEXT:    s_mul_i32 s15, s0, s1
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, s1
-; GCN-NEXT:    s_add_u32 s13, s13, s15
+; GCN-NEXT:    v_mul_hi_u32 v2, s0, v0
+; GCN-NEXT:    v_readfirstlane_b32 s10, v1
+; GCN-NEXT:    v_readfirstlane_b32 s2, v0
+; GCN-NEXT:    s_mul_i32 s11, s0, s10
+; GCN-NEXT:    v_readfirstlane_b32 s14, v2
+; GCN-NEXT:    s_mul_i32 s12, s1, s2
+; GCN-NEXT:    s_mul_i32 s13, s0, s2
+; GCN-NEXT:    s_add_i32 s11, s14, s11
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s13
+; GCN-NEXT:    s_add_i32 s11, s11, s12
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s11
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s13
+; GCN-NEXT:    v_readfirstlane_b32 s12, v3
+; GCN-NEXT:    s_mul_i32 s15, s2, s11
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s11
+; GCN-NEXT:    s_add_u32 s12, s12, s15
 ; GCN-NEXT:    v_readfirstlane_b32 s15, v0
-; GCN-NEXT:    s_mul_i32 s14, s12, s14
+; GCN-NEXT:    s_mul_i32 s13, s10, s13
 ; GCN-NEXT:    s_addc_u32 s15, 0, s15
-; GCN-NEXT:    v_readfirstlane_b32 s16, v4
-; GCN-NEXT:    s_add_u32 s13, s13, s14
-; GCN-NEXT:    s_addc_u32 s13, s15, s16
-; GCN-NEXT:    v_readfirstlane_b32 s14, v1
-; GCN-NEXT:    s_addc_u32 s14, s14, 0
-; GCN-NEXT:    s_mul_i32 s1, s12, s1
-; GCN-NEXT:    s_add_u32 s1, s13, s1
-; GCN-NEXT:    s_addc_u32 s13, 0, s14
-; GCN-NEXT:    s_add_i32 s14, s0, s1
-; GCN-NEXT:    v_mov_b32_e32 v0, s14
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GCN-NEXT:    v_mul_hi_u32 v0, s10, v0
-; GCN-NEXT:    s_or_b32 s0, s0, s1
-; GCN-NEXT:    s_cmp_lg_u32 s0, 0
-; GCN-NEXT:    s_addc_u32 s12, s12, s13
-; GCN-NEXT:    s_mul_i32 s0, s10, s12
-; GCN-NEXT:    v_readfirstlane_b32 s1, v0
-; GCN-NEXT:    s_add_i32 s0, s1, s0
-; GCN-NEXT:    s_mul_i32 s11, s11, s14
-; GCN-NEXT:    s_mul_i32 s1, s10, s14
-; GCN-NEXT:    s_add_i32 s0, s0, s11
-; GCN-NEXT:    v_mov_b32_e32 v2, s1
-; GCN-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-NEXT:    v_mul_hi_u32 v3, s12, v2
-; GCN-NEXT:    v_mul_hi_u32 v2, s14, v2
-; GCN-NEXT:    v_mul_hi_u32 v1, s12, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s14, v0
-; GCN-NEXT:    s_mul_i32 s11, s14, s0
-; GCN-NEXT:    v_readfirstlane_b32 s15, v2
-; GCN-NEXT:    s_add_u32 s11, s15, s11
+; GCN-NEXT:    v_readfirstlane_b32 s14, v4
+; GCN-NEXT:    s_add_u32 s12, s12, s13
+; GCN-NEXT:    s_addc_u32 s12, s15, s14
+; GCN-NEXT:    v_readfirstlane_b32 s13, v1
+; GCN-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-NEXT:    s_mul_i32 s11, s10, s11
+; GCN-NEXT:    s_add_u32 s11, s12, s11
+; GCN-NEXT:    s_addc_u32 s12, 0, s13
+; GCN-NEXT:    s_add_i32 s11, s2, s11
+; GCN-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-NEXT:    v_mul_hi_u32 v0, s0, v0
+; GCN-NEXT:    s_addc_u32 s10, s10, s12
+; GCN-NEXT:    s_mul_i32 s12, s0, s10
+; GCN-NEXT:    s_mul_i32 s1, s1, s11
 ; GCN-NEXT:    v_readfirstlane_b32 s13, v0
-; GCN-NEXT:    s_mul_i32 s1, s12, s1
-; GCN-NEXT:    s_addc_u32 s13, 0, s13
-; GCN-NEXT:    v_readfirstlane_b32 s10, v3
-; GCN-NEXT:    s_add_u32 s1, s11, s1
-; GCN-NEXT:    s_addc_u32 s1, s13, s10
-; GCN-NEXT:    v_readfirstlane_b32 s10, v1
-; GCN-NEXT:    s_addc_u32 s10, s10, 0
-; GCN-NEXT:    s_mul_i32 s0, s12, s0
-; GCN-NEXT:    s_add_u32 s0, s1, s0
-; GCN-NEXT:    s_addc_u32 s10, 0, s10
-; GCN-NEXT:    s_add_i32 s11, s14, s0
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GCN-NEXT:    s_or_b32 s0, s0, s1
-; GCN-NEXT:    s_cmp_lg_u32 s0, 0
-; GCN-NEXT:    s_addc_u32 s1, s12, s10
+; GCN-NEXT:    s_add_i32 s12, s13, s12
+; GCN-NEXT:    s_mul_i32 s0, s0, s11
+; GCN-NEXT:    s_add_i32 s1, s12, s1
+; GCN-NEXT:    v_mov_b32_e32 v2, s0
+; GCN-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-NEXT:    v_mul_hi_u32 v3, s10, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s10, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
+; GCN-NEXT:    s_mul_i32 s13, s11, s1
+; GCN-NEXT:    v_readfirstlane_b32 s15, v2
+; GCN-NEXT:    s_add_u32 s13, s15, s13
+; GCN-NEXT:    v_readfirstlane_b32 s14, v0
+; GCN-NEXT:    s_mul_i32 s0, s10, s0
+; GCN-NEXT:    s_addc_u32 s14, 0, s14
+; GCN-NEXT:    v_readfirstlane_b32 s12, v3
+; GCN-NEXT:    s_add_u32 s0, s13, s0
+; GCN-NEXT:    s_addc_u32 s0, s14, s12
+; GCN-NEXT:    v_readfirstlane_b32 s12, v1
+; GCN-NEXT:    s_addc_u32 s12, s12, 0
+; GCN-NEXT:    s_mul_i32 s1, s10, s1
+; GCN-NEXT:    s_add_u32 s0, s0, s1
+; GCN-NEXT:    s_addc_u32 s1, 0, s12
+; GCN-NEXT:    s_add_i32 s11, s11, s0
+; GCN-NEXT:    s_addc_u32 s1, s10, s1
 ; GCN-NEXT:    v_mov_b32_e32 v0, s1
 ; GCN-NEXT:    v_mul_hi_u32 v1, s6, v0
 ; GCN-NEXT:    v_mov_b32_e32 v2, s11
@@ -115,46 +108,42 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-NEXT:    v_readfirstlane_b32 s10, v0
 ; GCN-NEXT:    s_add_i32 s5, s10, s5
 ; GCN-NEXT:    s_mul_i32 s10, s9, s4
-; GCN-NEXT:    s_add_i32 s10, s5, s10
-; GCN-NEXT:    s_sub_i32 s11, s7, s10
+; GCN-NEXT:    s_add_i32 s12, s5, s10
+; GCN-NEXT:    s_sub_i32 s10, s7, s12
 ; GCN-NEXT:    s_mul_i32 s4, s8, s4
 ; GCN-NEXT:    s_sub_i32 s6, s6, s4
 ; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GCN-NEXT:    s_or_b32 s12, s4, s5
-; GCN-NEXT:    s_cmp_lg_u32 s12, 0
-; GCN-NEXT:    s_subb_u32 s11, s11, s9
-; GCN-NEXT:    s_sub_i32 s13, s6, s8
-; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
+; GCN-NEXT:    s_subb_u32 s13, s10, s9
+; GCN-NEXT:    s_sub_i32 s14, s6, s8
+; GCN-NEXT:    s_cselect_b64 s[10:11], 1, 0
+; GCN-NEXT:    s_subb_u32 s15, s13, 0
+; GCN-NEXT:    s_cmp_ge_u32 s15, s9
+; GCN-NEXT:    s_cselect_b32 s16, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s14, s8
+; GCN-NEXT:    s_cselect_b32 s17, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s15, s9
+; GCN-NEXT:    s_cselect_b32 s16, s17, s16
+; GCN-NEXT:    s_or_b32 s10, s10, s11
+; GCN-NEXT:    s_cmp_lg_u32 s10, 0
+; GCN-NEXT:    s_subb_u32 s10, s13, s9
+; GCN-NEXT:    s_sub_i32 s11, s14, s8
+; GCN-NEXT:    s_subb_u32 s10, s10, 0
+; GCN-NEXT:    s_cmp_lg_u32 s16, 0
+; GCN-NEXT:    s_cselect_b32 s11, s11, s14
+; GCN-NEXT:    s_cselect_b32 s10, s10, s15
 ; GCN-NEXT:    s_or_b32 s4, s4, s5
 ; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_subb_u32 s14, s11, 0
-; GCN-NEXT:    s_cmp_ge_u32 s14, s9
+; GCN-NEXT:    s_subb_u32 s4, s7, s12
+; GCN-NEXT:    s_cmp_ge_u32 s4, s9
 ; GCN-NEXT:    s_cselect_b32 s5, -1, 0
-; GCN-NEXT:    s_cmp_ge_u32 s13, s8
-; GCN-NEXT:    s_cselect_b32 s15, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s14, s9
-; GCN-NEXT:    s_cselect_b32 s15, s15, s5
-; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_subb_u32 s11, s11, s9
-; GCN-NEXT:    s_sub_i32 s16, s13, s8
-; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GCN-NEXT:    s_or_b32 s4, s4, s5
-; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_subb_u32 s4, s11, 0
-; GCN-NEXT:    s_cmp_lg_u32 s15, 0
-; GCN-NEXT:    s_cselect_b32 s5, s16, s13
-; GCN-NEXT:    s_cselect_b32 s4, s4, s14
-; GCN-NEXT:    s_cmp_lg_u32 s12, 0
-; GCN-NEXT:    s_subb_u32 s7, s7, s10
-; GCN-NEXT:    s_cmp_ge_u32 s7, s9
-; GCN-NEXT:    s_cselect_b32 s10, -1, 0
 ; GCN-NEXT:    s_cmp_ge_u32 s6, s8
-; GCN-NEXT:    s_cselect_b32 s8, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s7, s9
-; GCN-NEXT:    s_cselect_b32 s8, s8, s10
-; GCN-NEXT:    s_cmp_lg_u32 s8, 0
-; GCN-NEXT:    s_cselect_b32 s4, s4, s7
-; GCN-NEXT:    s_cselect_b32 s5, s5, s6
+; GCN-NEXT:    s_cselect_b32 s7, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s4, s9
+; GCN-NEXT:    s_cselect_b32 s5, s7, s5
+; GCN-NEXT:    s_cmp_lg_u32 s5, 0
+; GCN-NEXT:    s_cselect_b32 s4, s10, s4
+; GCN-NEXT:    s_cselect_b32 s5, s11, s6
+; GCN-NEXT:    s_mov_b32 s2, -1
 ; GCN-NEXT:    v_mov_b32_e32 v0, s5
 ; GCN-NEXT:    v_mov_b32_e32 v1, s4
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
@@ -170,35 +159,35 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b64 s10, s[6:7]
-; GCN-IR-NEXT:    s_flbit_i32_b64 s18, s[2:3]
+; GCN-IR-NEXT:    s_flbit_i32_b64 s16, s[2:3]
 ; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[12:13]
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s18
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s16
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
 ; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
 ; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s9, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT:    s_add_i32 s14, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s14
-; GCN-IR-NEXT:    s_add_u32 s16, s6, -1
-; GCN-IR-NEXT:    s_addc_u32 s17, s7, -1
+; GCN-IR-NEXT:    s_add_u32 s14, s6, -1
+; GCN-IR-NEXT:    s_addc_u32 s15, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[4:5], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s10, s4, s18
-; GCN-IR-NEXT:    s_addc_u32 s11, s5, 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s16, s4, s16
+; GCN-IR-NEXT:    s_addc_u32 s17, s5, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -206,19 +195,19 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s4, s16, s12
-; GCN-IR-NEXT:    s_subb_u32 s4, s17, s13
-; GCN-IR-NEXT:    s_ashr_i32 s14, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s15, s14
-; GCN-IR-NEXT:    s_and_b32 s4, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s4, s14, s12
+; GCN-IR-NEXT:    s_subb_u32 s4, s15, s13
+; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s11, s10
+; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
+; GCN-IR-NEXT:    s_and_b64 s[18:19], s[10:11], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT:    s_add_i32 s16, s16, 1
+; GCN-IR-NEXT:    s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow7
@@ -804,12 +793,11 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-LABEL: s_test_urem_k_num_i64:
 ; GCN:       ; %bb.0:
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
-; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s2
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s3
-; GCN-NEXT:    s_sub_u32 s6, 0, s2
-; GCN-NEXT:    s_subb_u32 s8, 0, s3
+; GCN-NEXT:    s_sub_u32 s4, 0, s2
+; GCN-NEXT:    s_subb_u32 s5, 0, s3
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -818,75 +806,69 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v1, v1
-; GCN-NEXT:    v_mul_hi_u32 v2, s6, v0
-; GCN-NEXT:    v_readfirstlane_b32 s9, v1
-; GCN-NEXT:    v_readfirstlane_b32 s4, v0
-; GCN-NEXT:    s_mul_i32 s5, s6, s9
-; GCN-NEXT:    v_readfirstlane_b32 s12, v2
-; GCN-NEXT:    s_mul_i32 s10, s8, s4
-; GCN-NEXT:    s_mul_i32 s11, s6, s4
-; GCN-NEXT:    s_add_i32 s5, s12, s5
-; GCN-NEXT:    v_mul_hi_u32 v3, v0, s11
-; GCN-NEXT:    s_add_i32 s5, s5, s10
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, s5
-; GCN-NEXT:    v_mul_hi_u32 v4, v1, s11
-; GCN-NEXT:    v_readfirstlane_b32 s10, v3
-; GCN-NEXT:    v_mul_hi_u32 v1, v1, s5
-; GCN-NEXT:    s_mul_i32 s13, s4, s5
-; GCN-NEXT:    s_add_u32 s10, s10, s13
-; GCN-NEXT:    v_readfirstlane_b32 s13, v0
-; GCN-NEXT:    s_mul_i32 s11, s9, s11
-; GCN-NEXT:    s_addc_u32 s13, 0, s13
-; GCN-NEXT:    v_readfirstlane_b32 s12, v4
-; GCN-NEXT:    s_add_u32 s10, s10, s11
-; GCN-NEXT:    v_readfirstlane_b32 s14, v1
-; GCN-NEXT:    s_addc_u32 s10, s13, s12
-; GCN-NEXT:    s_addc_u32 s11, s14, 0
-; GCN-NEXT:    s_mul_i32 s5, s9, s5
-; GCN-NEXT:    s_add_u32 s5, s10, s5
-; GCN-NEXT:    s_addc_u32 s10, 0, s11
-; GCN-NEXT:    s_add_i32 s11, s4, s5
-; GCN-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GCN-NEXT:    v_mul_hi_u32 v0, s6, v0
-; GCN-NEXT:    s_or_b32 s4, s4, s5
-; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_addc_u32 s9, s9, s10
-; GCN-NEXT:    s_mul_i32 s4, s6, s9
-; GCN-NEXT:    v_readfirstlane_b32 s5, v0
-; GCN-NEXT:    s_add_i32 s4, s5, s4
-; GCN-NEXT:    s_mul_i32 s8, s8, s11
-; GCN-NEXT:    s_mul_i32 s5, s6, s11
-; GCN-NEXT:    s_add_i32 s4, s4, s8
-; GCN-NEXT:    v_mov_b32_e32 v2, s5
-; GCN-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-NEXT:    v_mul_hi_u32 v3, s9, v2
-; GCN-NEXT:    v_mul_hi_u32 v2, s11, v2
-; GCN-NEXT:    v_mul_hi_u32 v1, s9, v0
-; GCN-NEXT:    v_mul_hi_u32 v0, s11, v0
-; GCN-NEXT:    s_mul_i32 s8, s11, s4
-; GCN-NEXT:    v_readfirstlane_b32 s12, v2
-; GCN-NEXT:    s_add_u32 s8, s12, s8
-; GCN-NEXT:    v_readfirstlane_b32 s10, v0
-; GCN-NEXT:    s_mul_i32 s5, s9, s5
-; GCN-NEXT:    s_addc_u32 s10, 0, s10
-; GCN-NEXT:    v_readfirstlane_b32 s6, v3
-; GCN-NEXT:    s_add_u32 s5, s8, s5
-; GCN-NEXT:    s_addc_u32 s5, s10, s6
+; GCN-NEXT:    v_mul_hi_u32 v2, s4, v0
 ; GCN-NEXT:    v_readfirstlane_b32 s6, v1
-; GCN-NEXT:    s_addc_u32 s6, s6, 0
-; GCN-NEXT:    s_mul_i32 s4, s9, s4
-; GCN-NEXT:    s_add_u32 s4, s5, s4
-; GCN-NEXT:    s_addc_u32 s6, 0, s6
-; GCN-NEXT:    s_add_i32 s11, s11, s4
-; GCN-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GCN-NEXT:    s_or_b32 s4, s4, s5
-; GCN-NEXT:    s_cmp_lg_u32 s4, 0
-; GCN-NEXT:    s_addc_u32 s4, s9, s6
-; GCN-NEXT:    v_mul_hi_u32 v1, s11, 24
+; GCN-NEXT:    v_readfirstlane_b32 s7, v0
+; GCN-NEXT:    s_mul_i32 s8, s4, s6
+; GCN-NEXT:    v_readfirstlane_b32 s11, v2
+; GCN-NEXT:    s_mul_i32 s9, s5, s7
+; GCN-NEXT:    s_mul_i32 s10, s4, s7
+; GCN-NEXT:    s_add_i32 s8, s11, s8
+; GCN-NEXT:    v_mul_hi_u32 v3, v0, s10
+; GCN-NEXT:    s_add_i32 s8, s8, s9
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, s8
+; GCN-NEXT:    v_mul_hi_u32 v4, v1, s10
+; GCN-NEXT:    v_readfirstlane_b32 s9, v3
+; GCN-NEXT:    s_mul_i32 s12, s7, s8
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, s8
+; GCN-NEXT:    s_add_u32 s9, s9, s12
+; GCN-NEXT:    v_readfirstlane_b32 s12, v0
+; GCN-NEXT:    s_mul_i32 s10, s6, s10
+; GCN-NEXT:    s_addc_u32 s12, 0, s12
+; GCN-NEXT:    v_readfirstlane_b32 s11, v4
+; GCN-NEXT:    s_add_u32 s9, s9, s10
+; GCN-NEXT:    v_readfirstlane_b32 s13, v1
+; GCN-NEXT:    s_addc_u32 s9, s12, s11
+; GCN-NEXT:    s_mul_i32 s8, s6, s8
+; GCN-NEXT:    s_addc_u32 s10, s13, 0
+; GCN-NEXT:    s_add_u32 s8, s9, s8
+; GCN-NEXT:    s_addc_u32 s9, 0, s10
+; GCN-NEXT:    s_add_i32 s8, s7, s8
+; GCN-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-NEXT:    v_mul_hi_u32 v0, s4, v0
+; GCN-NEXT:    s_addc_u32 s6, s6, s9
+; GCN-NEXT:    s_mul_i32 s9, s4, s6
+; GCN-NEXT:    s_mul_i32 s5, s5, s8
+; GCN-NEXT:    v_readfirstlane_b32 s10, v0
+; GCN-NEXT:    s_add_i32 s9, s10, s9
+; GCN-NEXT:    s_mul_i32 s4, s4, s8
+; GCN-NEXT:    s_add_i32 s5, s9, s5
+; GCN-NEXT:    v_mov_b32_e32 v2, s4
+; GCN-NEXT:    v_mov_b32_e32 v0, s5
+; GCN-NEXT:    v_mul_hi_u32 v3, s6, v2
+; GCN-NEXT:    v_mul_hi_u32 v2, s8, v2
+; GCN-NEXT:    v_mul_hi_u32 v1, s6, v0
+; GCN-NEXT:    v_mul_hi_u32 v0, s8, v0
+; GCN-NEXT:    s_mul_i32 s10, s8, s5
+; GCN-NEXT:    v_readfirstlane_b32 s12, v2
+; GCN-NEXT:    s_add_u32 s10, s12, s10
+; GCN-NEXT:    v_readfirstlane_b32 s11, v0
+; GCN-NEXT:    s_mul_i32 s4, s6, s4
+; GCN-NEXT:    s_addc_u32 s11, 0, s11
+; GCN-NEXT:    v_readfirstlane_b32 s9, v3
+; GCN-NEXT:    s_add_u32 s4, s10, s4
+; GCN-NEXT:    s_addc_u32 s4, s11, s9
+; GCN-NEXT:    v_readfirstlane_b32 s9, v1
+; GCN-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-NEXT:    s_mul_i32 s5, s6, s5
+; GCN-NEXT:    s_add_u32 s4, s4, s5
+; GCN-NEXT:    s_addc_u32 s5, 0, s9
+; GCN-NEXT:    s_add_i32 s8, s8, s4
+; GCN-NEXT:    s_addc_u32 s4, s6, s5
+; GCN-NEXT:    v_mul_hi_u32 v1, s8, 24
 ; GCN-NEXT:    v_mul_hi_u32 v0, s4, 24
 ; GCN-NEXT:    s_mul_i32 s4, s4, 24
-; GCN-NEXT:    s_mov_b32 s6, -1
+; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    v_readfirstlane_b32 s8, v1
 ; GCN-NEXT:    v_readfirstlane_b32 s5, v0
 ; GCN-NEXT:    s_add_u32 s4, s8, s4
@@ -897,46 +879,42 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-NEXT:    s_mov_b32 s5, s1
 ; GCN-NEXT:    s_mul_i32 s0, s3, s8
 ; GCN-NEXT:    v_readfirstlane_b32 s1, v0
-; GCN-NEXT:    s_add_i32 s9, s1, s0
-; GCN-NEXT:    s_sub_i32 s10, 0, s9
+; GCN-NEXT:    s_add_i32 s10, s1, s0
+; GCN-NEXT:    s_sub_i32 s9, 0, s10
 ; GCN-NEXT:    s_mul_i32 s0, s2, s8
-; GCN-NEXT:    s_sub_i32 s8, 24, s0
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GCN-NEXT:    s_or_b32 s11, s0, s1
-; GCN-NEXT:    s_cmp_lg_u32 s11, 0
-; GCN-NEXT:    s_subb_u32 s10, s10, s3
-; GCN-NEXT:    s_sub_i32 s12, s8, s2
+; GCN-NEXT:    s_sub_i32 s11, 24, s0
 ; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; GCN-NEXT:    s_subb_u32 s12, s9, s3
+; GCN-NEXT:    s_sub_i32 s13, s11, s2
+; GCN-NEXT:    s_cselect_b64 s[8:9], 1, 0
+; GCN-NEXT:    s_subb_u32 s14, s12, 0
+; GCN-NEXT:    s_cmp_ge_u32 s14, s3
+; GCN-NEXT:    s_cselect_b32 s15, -1, 0
+; GCN-NEXT:    s_cmp_ge_u32 s13, s2
+; GCN-NEXT:    s_cselect_b32 s16, -1, 0
+; GCN-NEXT:    s_cmp_eq_u32 s14, s3
+; GCN-NEXT:    s_cselect_b32 s15, s16, s15
+; GCN-NEXT:    s_or_b32 s8, s8, s9
+; GCN-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-NEXT:    s_subb_u32 s8, s12, s3
+; GCN-NEXT:    s_sub_i32 s9, s13, s2
+; GCN-NEXT:    s_subb_u32 s8, s8, 0
+; GCN-NEXT:    s_cmp_lg_u32 s15, 0
+; GCN-NEXT:    s_cselect_b32 s9, s9, s13
+; GCN-NEXT:    s_cselect_b32 s8, s8, s14
 ; GCN-NEXT:    s_or_b32 s0, s0, s1
 ; GCN-NEXT:    s_cmp_lg_u32 s0, 0
-; GCN-NEXT:    s_subb_u32 s13, s10, 0
-; GCN-NEXT:    s_cmp_ge_u32 s13, s3
+; GCN-NEXT:    s_subb_u32 s0, 0, s10
+; GCN-NEXT:    s_cmp_ge_u32 s0, s3
 ; GCN-NEXT:    s_cselect_b32 s1, -1, 0
-; GCN-NEXT:    s_cmp_ge_u32 s12, s2
-; GCN-NEXT:    s_cselect_b32 s14, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s13, s3
-; GCN-NEXT:    s_cselect_b32 s14, s14, s1
-; GCN-NEXT:    s_cmp_lg_u32 s0, 0
-; GCN-NEXT:    s_subb_u32 s10, s10, s3
-; GCN-NEXT:    s_sub_i32 s15, s12, s2
-; GCN-NEXT:    s_cselect_b64 s[0:1], 1, 0
-; GCN-NEXT:    s_or_b32 s0, s0, s1
-; GCN-NEXT:    s_cmp_lg_u32 s0, 0
-; GCN-NEXT:    s_subb_u32 s0, s10, 0
-; GCN-NEXT:    s_cmp_lg_u32 s14, 0
-; GCN-NEXT:    s_cselect_b32 s1, s15, s12
-; GCN-NEXT:    s_cselect_b32 s0, s0, s13
-; GCN-NEXT:    s_cmp_lg_u32 s11, 0
-; GCN-NEXT:    s_subb_u32 s9, 0, s9
-; GCN-NEXT:    s_cmp_ge_u32 s9, s3
-; GCN-NEXT:    s_cselect_b32 s10, -1, 0
-; GCN-NEXT:    s_cmp_ge_u32 s8, s2
+; GCN-NEXT:    s_cmp_ge_u32 s11, s2
 ; GCN-NEXT:    s_cselect_b32 s2, -1, 0
-; GCN-NEXT:    s_cmp_eq_u32 s9, s3
-; GCN-NEXT:    s_cselect_b32 s2, s2, s10
-; GCN-NEXT:    s_cmp_lg_u32 s2, 0
-; GCN-NEXT:    s_cselect_b32 s0, s0, s9
-; GCN-NEXT:    s_cselect_b32 s1, s1, s8
+; GCN-NEXT:    s_cmp_eq_u32 s0, s3
+; GCN-NEXT:    s_cselect_b32 s1, s2, s1
+; GCN-NEXT:    s_cmp_lg_u32 s1, 0
+; GCN-NEXT:    s_cselect_b32 s0, s8, s0
+; GCN-NEXT:    s_cselect_b32 s1, s9, s11
+; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    v_mov_b32_e32 v0, s1
 ; GCN-NEXT:    v_mov_b32_e32 v1, s0
 ; GCN-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
@@ -947,34 +925,34 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc5
+; GCN-IR-NEXT:    s_flbit_i32_b64 s14, s[2:3]
+; GCN-IR-NEXT:    s_add_u32 s8, s14, 0xffffffc5
 ; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[6:7], s[10:11]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[10:11], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_add_i32 s10, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
-; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
-; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_add_u32 s12, s2, -1
+; GCN-IR-NEXT:    s_addc_u32 s13, s3, -1
+; GCN-IR-NEXT:    s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB6_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -982,19 +960,19 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s4, s14, s10
-; GCN-IR-NEXT:    s_subb_u32 s4, s15, s11
-; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s13, s12
-; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
-; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT:    s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, s12, s10
+; GCN-IR-NEXT:    s_subb_u32 s4, s13, s11
+; GCN-IR-NEXT:    s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s9, s8
+; GCN-IR-NEXT:    s_and_b32 s4, s8, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[8:9], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 1
+; GCN-IR-NEXT:    s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_3
 ; GCN-IR-NEXT:  .LBB6_4: ; %Flow6
@@ -1063,52 +1041,52 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s8, 59, s12
+; GCN-IR-NEXT:    s_flbit_i32_b64 s10, s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s8, 59, s10
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[4:5], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s7, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_add_i32 s11, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[10:11], s[2:3], s10
-; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc4
-; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_lshr_b64 s[8:9], s[2:3], s11
+; GCN-IR-NEXT:    s_add_u32 s12, s10, 0xffffffc4
+; GCN-IR-NEXT:    s_addc_u32 s13, 0, -1
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB7_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s4, 23, s10
-; GCN-IR-NEXT:    s_subb_u32 s4, 0, s11
-; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
-; GCN-IR-NEXT:    s_and_b32 s12, s12, 24
-; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT:    s_subb_u32 s11, s11, 0
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[10:11], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, 23, s8
+; GCN-IR-NEXT:    s_subb_u32 s4, 0, s9
+; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
+; GCN-IR-NEXT:    s_and_b32 s10, s10, 24
+; GCN-IR-NEXT:    s_sub_u32 s8, s8, s10
+; GCN-IR-NEXT:    s_subb_u32 s9, s9, 0
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[14:15], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_3
 ; GCN-IR-NEXT:  .LBB7_4: ; %Flow6
diff --git a/llvm/test/CodeGen/AMDGPU/usubo.ll b/llvm/test/CodeGen/AMDGPU/usubo.ll
index 0289dab4588a2..12713549e83f9 100644
--- a/llvm/test/CodeGen/AMDGPU/usubo.ll
+++ b/llvm/test/CodeGen/AMDGPU/usubo.ll
@@ -14,15 +14,13 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
-; SI-NEXT:    s_sub_u32 s0, s2, s8
-; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    s_sub_i32 s2, s2, s8
+; SI-NEXT:    s_subb_u32 s3, s3, s9
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    s_subb_u32 s1, s3, s9
+; SI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
 ; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -33,15 +31,13 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_sub_u32 s0, s2, s4
-; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_sub_i32 s2, s2, s4
+; VI-NEXT:    s_subb_u32 s3, s3, s5
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    s_subb_u32 s1, s3, s5
-; VI-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s2, v2
 ; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
@@ -52,13 +48,11 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-NEXT:    s_sub_u32 s4, s2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    s_sub_i32 s4, s2, s6
 ; GFX9-NEXT:    s_subb_u32 s5, s3, s7
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
+; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[2:3]
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s4, v0
 ; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
@@ -71,12 +65,12 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX10-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_sub_u32 s4, s2, s6
-; GFX10-NEXT:    s_subb_u32 s5, s3, s7
-; GFX10-NEXT:    v_cmp_gt_u64_e64 s2, s[4:5], s[2:3]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-NEXT:    v_add_co_u32 v0, s2, s4, v0
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, s2, s5, 0, s2
+; GFX10-NEXT:    s_sub_i32 s2, s2, s6
+; GFX10-NEXT:    s_subb_u32 s3, s3, s7
+; GFX10-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
+; GFX10-NEXT:    v_add_co_u32 v0, s2, s2, v0
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, s2, s3, 0, s2
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX10-NEXT:    s_endpgm
 ;
@@ -87,14 +81,14 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX11-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_sub_u32 s4, s2, s4
-; GFX11-NEXT:    s_subb_u32 s5, s3, s5
+; GFX11-NEXT:    s_sub_i32 s2, s2, s4
+; GFX11-NEXT:    s_subb_u32 s3, s3, s5
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_cmp_gt_u64_e64 s2, s[4:5], s[2:3]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v0, s2, s4, v0
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s5, 0, s2
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
+; GFX11-NEXT:    v_add_co_u32 v0, s2, s2, v0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s3, 0, s2
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    s_endpgm
   %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) #0
@@ -435,21 +429,20 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_sub_u32 s6, s4, s6
-; SI-NEXT:    v_mov_b32_e32 v0, s4
-; SI-NEXT:    s_subb_u32 s7, s5, s7
-; SI-NEXT:    v_mov_b32_e32 v1, s5
-; SI-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    s_sub_i32 s4, s4, s6
+; SI-NEXT:    s_subb_u32 s5, s5, s7
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; SI-NEXT:    s_mov_b32 s0, s2
 ; SI-NEXT:    s_mov_b32 s1, s3
 ; SI-NEXT:    s_mov_b32 s2, s10
 ; SI-NEXT:    s_mov_b32 s3, s11
-; SI-NEXT:    v_mov_b32_e32 v3, s7
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
 ; SI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -458,36 +451,32 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_sub_u32 s0, s4, s6
-; VI-NEXT:    v_mov_b32_e32 v4, s4
+; VI-NEXT:    s_sub_i32 s0, s4, s6
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
 ; VI-NEXT:    s_subb_u32 s1, s5, s7
-; VI-NEXT:    v_mov_b32_e32 v5, s5
-; VI-NEXT:    v_mov_b32_e32 v7, s1
-; VI-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v6, s0
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v2, s2
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; VI-NEXT:    flat_store_byte v[2:3], v0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: s_usubo_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_sub_u32 s0, s12, s14
-; GFX9-NEXT:    v_mov_b32_e32 v0, s12
-; GFX9-NEXT:    v_mov_b32_e32 v1, s13
+; GFX9-NEXT:    s_sub_i32 s0, s12, s14
 ; GFX9-NEXT:    s_subb_u32 s1, s13, s15
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT:    v_mov_b32_e32 v0, s0
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT:    global_store_byte v2, v3, s[10:11]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: s_usubo_i64:
@@ -495,11 +484,11 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX10-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_sub_u32 s0, s12, s14
+; GFX10-NEXT:    s_sub_i32 s0, s12, s14
 ; GFX10-NEXT:    s_subb_u32 s1, s13, s15
 ; GFX10-NEXT:    v_mov_b32_e32 v0, s0
+; GFX10-NEXT:    s_cselect_b32 s0, -1, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s1
-; GFX10-NEXT:    v_cmp_gt_u64_e64 s0, s[0:1], s[12:13]
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s0
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
 ; GFX10-NEXT:    global_store_byte v2, v3, s[10:11]
@@ -509,12 +498,11 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_sub_u32 s6, s4, s6
-; GFX11-NEXT:    s_subb_u32 s7, s5, s7
-; GFX11-NEXT:    v_mov_b32_e32 v0, s6
-; GFX11-NEXT:    v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    s_sub_i32 s4, s4, s6
+; GFX11-NEXT:    s_subb_u32 s5, s5, s7
+; GFX11-NEXT:    v_mov_b32_e32 v0, s4
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -550,10 +538,10 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    s_mov_b32 s4, s2
 ; SI-NEXT:    s_mov_b32 s5, s3
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_sub_i32_e32 v2, vcc, v0, v2
-; SI-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; SI-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; SI-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; SI-NEXT:    buffer_store_byte v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -573,10 +561,9 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; VI-NEXT:    v_mov_b32_e32 v6, s2
 ; VI-NEXT:    v_mov_b32_e32 v7, s3
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_sub_u32_e32 v2, vcc, v0, v2
-; VI-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; VI-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; VI-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT:    v_sub_u32_e32 v0, vcc, v0, v2
+; VI-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; VI-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; VI-NEXT:    flat_store_byte v[6:7], v0
 ; VI-NEXT:    s_endpgm
@@ -589,10 +576,9 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX9-NEXT:    global_load_dwordx2 v[0:1], v4, s[12:13]
 ; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[14:15]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT:    global_store_dwordx2 v4, v[0:1], s[8:9]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
 ; GFX9-NEXT:    s_endpgm
@@ -606,12 +592,11 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX10-NEXT:    global_load_dwordx2 v[0:1], v4, s[12:13]
 ; GFX10-NEXT:    global_load_dwordx2 v[2:3], v4, s[14:15]
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT:    v_sub_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX10-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX10-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX10-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX10-NEXT:    global_store_dwordx2 v4, v[0:1], s[8:9]
+; GFX10-NEXT:    global_store_byte v4, v2, s[10:11]
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: v_usubo_i64:
@@ -623,14 +608,12 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX11-NEXT:    global_load_b64 v[0:1], v4, s[4:5]
 ; GFX11-NEXT:    global_load_b64 v[2:3], v4, s[6:7]
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    global_store_b64 v4, v[2:3], s[0:1]
-; GFX11-NEXT:    global_store_b8 v4, v0, s[2:3]
+; GFX11-NEXT:    global_store_b64 v4, v[0:1], s[0:1]
+; GFX11-NEXT:    global_store_b8 v4, v2, s[2:3]
 ; GFX11-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index 89f719da21ebf..c64d7e635edca 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -774,44 +774,40 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
 ; GFX1032-NEXT:    s_add_u32 s11, s12, s11
 ; GFX1032-NEXT:    s_addc_u32 s12, 0, s13
 ; GFX1032-NEXT:    s_add_i32 s8, s8, s11
-; GFX1032-NEXT:    s_cselect_b32 s11, 1, 0
-; GFX1032-NEXT:    s_mul_hi_u32 s13, s9, s8
-; GFX1032-NEXT:    s_cmp_lg_u32 s11, 0
-; GFX1032-NEXT:    s_mul_i32 s11, s9, s8
 ; GFX1032-NEXT:    s_addc_u32 s5, s5, s12
-; GFX1032-NEXT:    s_mul_i32 s10, s10, s8
+; GFX1032-NEXT:    s_mul_hi_u32 s11, s9, s8
+; GFX1032-NEXT:    s_mul_i32 s12, s9, s8
 ; GFX1032-NEXT:    s_mul_i32 s9, s9, s5
-; GFX1032-NEXT:    s_mul_hi_u32 s12, s8, s11
-; GFX1032-NEXT:    s_add_i32 s9, s13, s9
-; GFX1032-NEXT:    s_mul_hi_u32 s13, s5, s11
+; GFX1032-NEXT:    s_mul_i32 s10, s10, s8
+; GFX1032-NEXT:    s_add_i32 s9, s11, s9
+; GFX1032-NEXT:    s_mul_i32 s11, s5, s12
 ; GFX1032-NEXT:    s_add_i32 s9, s9, s10
-; GFX1032-NEXT:    s_mul_i32 s10, s5, s11
+; GFX1032-NEXT:    s_mul_hi_u32 s10, s8, s12
 ; GFX1032-NEXT:    s_mul_i32 s15, s8, s9
 ; GFX1032-NEXT:    s_mul_hi_u32 s14, s8, s9
-; GFX1032-NEXT:    s_add_u32 s12, s12, s15
+; GFX1032-NEXT:    s_add_u32 s10, s10, s15
+; GFX1032-NEXT:    s_mul_hi_u32 s13, s5, s12
 ; GFX1032-NEXT:    s_addc_u32 s14, 0, s14
-; GFX1032-NEXT:    s_mul_hi_u32 s11, s5, s9
-; GFX1032-NEXT:    s_add_u32 s10, s12, s10
+; GFX1032-NEXT:    s_mul_hi_u32 s12, s5, s9
+; GFX1032-NEXT:    s_add_u32 s10, s10, s11
 ; GFX1032-NEXT:    s_mul_i32 s9, s5, s9
 ; GFX1032-NEXT:    s_addc_u32 s10, s14, s13
-; GFX1032-NEXT:    s_addc_u32 s11, s11, 0
+; GFX1032-NEXT:    s_addc_u32 s11, s12, 0
 ; GFX1032-NEXT:    s_add_u32 s9, s10, s9
 ; GFX1032-NEXT:    s_addc_u32 s10, 0, s11
 ; GFX1032-NEXT:    s_add_i32 s8, s8, s9
-; GFX1032-NEXT:    s_cselect_b32 s9, 1, 0
-; GFX1032-NEXT:    s_mul_hi_u32 s11, s2, s8
-; GFX1032-NEXT:    s_cmp_lg_u32 s9, 0
-; GFX1032-NEXT:    s_mul_hi_u32 s9, s3, s8
 ; GFX1032-NEXT:    s_addc_u32 s5, s5, s10
-; GFX1032-NEXT:    s_mul_i32 s8, s3, s8
+; GFX1032-NEXT:    s_mul_hi_u32 s9, s2, s8
 ; GFX1032-NEXT:    s_mul_i32 s12, s2, s5
-; GFX1032-NEXT:    s_mul_hi_u32 s10, s2, s5
-; GFX1032-NEXT:    s_add_u32 s11, s11, s12
-; GFX1032-NEXT:    s_addc_u32 s10, 0, s10
+; GFX1032-NEXT:    s_mul_hi_u32 s11, s2, s5
+; GFX1032-NEXT:    s_mul_hi_u32 s10, s3, s8
+; GFX1032-NEXT:    s_mul_i32 s8, s3, s8
+; GFX1032-NEXT:    s_add_u32 s9, s9, s12
+; GFX1032-NEXT:    s_addc_u32 s11, 0, s11
 ; GFX1032-NEXT:    s_mul_hi_u32 s13, s3, s5
-; GFX1032-NEXT:    s_add_u32 s8, s11, s8
+; GFX1032-NEXT:    s_add_u32 s8, s9, s8
 ; GFX1032-NEXT:    s_mul_i32 s5, s3, s5
-; GFX1032-NEXT:    s_addc_u32 s8, s10, s9
+; GFX1032-NEXT:    s_addc_u32 s8, s11, s10
 ; GFX1032-NEXT:    s_addc_u32 s9, s13, 0
 ; GFX1032-NEXT:    s_add_u32 s5, s8, s5
 ; GFX1032-NEXT:    s_addc_u32 s8, 0, s9
@@ -824,11 +820,8 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
 ; GFX1032-NEXT:    s_sub_i32 s11, s3, s9
 ; GFX1032-NEXT:    s_sub_i32 s10, s2, s10
 ; GFX1032-NEXT:    s_cselect_b32 s12, 1, 0
-; GFX1032-NEXT:    s_cmp_lg_u32 s12, 0
 ; GFX1032-NEXT:    s_subb_u32 s11, s11, s1
 ; GFX1032-NEXT:    s_sub_i32 s13, s10, s0
-; GFX1032-NEXT:    s_cselect_b32 s14, 1, 0
-; GFX1032-NEXT:    s_cmp_lg_u32 s14, 0
 ; GFX1032-NEXT:    s_subb_u32 s11, s11, 0
 ; GFX1032-NEXT:    s_cmp_ge_u32 s11, s1
 ; GFX1032-NEXT:    s_cselect_b32 s14, -1, 0
@@ -901,8 +894,8 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
 ; GFX1064-NEXT:  ; %bb.1:
 ; GFX1064-NEXT:    v_cvt_f32_u32_e32 v0, s0
 ; GFX1064-NEXT:    v_cvt_f32_u32_e32 v1, s1
-; GFX1064-NEXT:    s_sub_u32 s9, 0, s0
-; GFX1064-NEXT:    s_subb_u32 s10, 0, s1
+; GFX1064-NEXT:    s_sub_u32 s8, 0, s0
+; GFX1064-NEXT:    s_subb_u32 s9, 0, s1
 ; GFX1064-NEXT:    v_madmk_f32 v0, v1, 0x4f800000, v0
 ; GFX1064-NEXT:    v_rcp_f32_e32 v0, v0
 ; GFX1064-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -911,109 +904,102 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
 ; GFX1064-NEXT:    v_madmk_f32 v0, v1, 0xcf800000, v0
 ; GFX1064-NEXT:    v_cvt_u32_f32_e32 v1, v1
 ; GFX1064-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX1064-NEXT:    v_readfirstlane_b32 s8, v1
-; GFX1064-NEXT:    v_readfirstlane_b32 s4, v0
-; GFX1064-NEXT:    s_mul_i32 s5, s9, s8
-; GFX1064-NEXT:    s_mul_hi_u32 s12, s9, s4
-; GFX1064-NEXT:    s_mul_i32 s11, s10, s4
-; GFX1064-NEXT:    s_add_i32 s5, s12, s5
-; GFX1064-NEXT:    s_mul_i32 s13, s9, s4
-; GFX1064-NEXT:    s_add_i32 s5, s5, s11
-; GFX1064-NEXT:    s_mul_hi_u32 s12, s4, s13
-; GFX1064-NEXT:    s_mul_i32 s15, s4, s5
-; GFX1064-NEXT:    s_mul_hi_u32 s14, s8, s13
-; GFX1064-NEXT:    s_mul_i32 s11, s8, s13
-; GFX1064-NEXT:    s_mul_hi_u32 s13, s4, s5
+; GFX1064-NEXT:    v_readfirstlane_b32 s4, v1
+; GFX1064-NEXT:    v_readfirstlane_b32 s5, v0
+; GFX1064-NEXT:    s_mul_i32 s10, s8, s4
+; GFX1064-NEXT:    s_mul_hi_u32 s12, s8, s5
+; GFX1064-NEXT:    s_mul_i32 s11, s9, s5
+; GFX1064-NEXT:    s_add_i32 s10, s12, s10
+; GFX1064-NEXT:    s_mul_i32 s13, s8, s5
+; GFX1064-NEXT:    s_add_i32 s10, s10, s11
+; GFX1064-NEXT:    s_mul_hi_u32 s12, s5, s13
+; GFX1064-NEXT:    s_mul_i32 s15, s5, s10
+; GFX1064-NEXT:    s_mul_hi_u32 s14, s4, s13
+; GFX1064-NEXT:    s_mul_i32 s11, s4, s13
+; GFX1064-NEXT:    s_mul_hi_u32 s13, s5, s10
 ; GFX1064-NEXT:    s_add_u32 s12, s12, s15
 ; GFX1064-NEXT:    s_addc_u32 s13, 0, s13
-; GFX1064-NEXT:    s_mul_hi_u32 s16, s8, s5
+; GFX1064-NEXT:    s_mul_hi_u32 s16, s4, s10
 ; GFX1064-NEXT:    s_add_u32 s11, s12, s11
-; GFX1064-NEXT:    s_mul_i32 s5, s8, s5
+; GFX1064-NEXT:    s_mul_i32 s10, s4, s10
 ; GFX1064-NEXT:    s_addc_u32 s11, s13, s14
 ; GFX1064-NEXT:    s_addc_u32 s12, s16, 0
-; GFX1064-NEXT:    s_add_u32 s5, s11, s5
+; GFX1064-NEXT:    s_add_u32 s10, s11, s10
 ; GFX1064-NEXT:    s_addc_u32 s11, 0, s12
-; GFX1064-NEXT:    s_add_i32 s12, s4, s5
-; GFX1064-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX1064-NEXT:    s_mul_hi_u32 s13, s9, s12
-; GFX1064-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX1064-NEXT:    s_mul_i32 s4, s9, s12
-; GFX1064-NEXT:    s_addc_u32 s8, s8, s11
-; GFX1064-NEXT:    s_mul_i32 s10, s10, s12
-; GFX1064-NEXT:    s_mul_i32 s9, s9, s8
-; GFX1064-NEXT:    s_mul_hi_u32 s5, s12, s4
-; GFX1064-NEXT:    s_add_i32 s9, s13, s9
-; GFX1064-NEXT:    s_mul_hi_u32 s11, s8, s4
-; GFX1064-NEXT:    s_add_i32 s9, s9, s10
-; GFX1064-NEXT:    s_mul_i32 s4, s8, s4
-; GFX1064-NEXT:    s_mul_i32 s14, s12, s9
-; GFX1064-NEXT:    s_mul_hi_u32 s13, s12, s9
-; GFX1064-NEXT:    s_add_u32 s5, s5, s14
+; GFX1064-NEXT:    s_add_i32 s5, s5, s10
+; GFX1064-NEXT:    s_addc_u32 s4, s4, s11
+; GFX1064-NEXT:    s_mul_hi_u32 s10, s8, s5
+; GFX1064-NEXT:    s_mul_i32 s11, s8, s5
+; GFX1064-NEXT:    s_mul_i32 s8, s8, s4
+; GFX1064-NEXT:    s_mul_i32 s9, s9, s5
+; GFX1064-NEXT:    s_add_i32 s8, s10, s8
+; GFX1064-NEXT:    s_mul_i32 s10, s4, s11
+; GFX1064-NEXT:    s_add_i32 s8, s8, s9
+; GFX1064-NEXT:    s_mul_hi_u32 s9, s5, s11
+; GFX1064-NEXT:    s_mul_i32 s14, s5, s8
+; GFX1064-NEXT:    s_mul_hi_u32 s13, s5, s8
+; GFX1064-NEXT:    s_add_u32 s9, s9, s14
+; GFX1064-NEXT:    s_mul_hi_u32 s12, s4, s11
 ; GFX1064-NEXT:    s_addc_u32 s13, 0, s13
-; GFX1064-NEXT:    s_mul_hi_u32 s10, s8, s9
-; GFX1064-NEXT:    s_add_u32 s4, s5, s4
-; GFX1064-NEXT:    s_mul_i32 s9, s8, s9
-; GFX1064-NEXT:    s_addc_u32 s4, s13, s11
-; GFX1064-NEXT:    s_addc_u32 s5, s10, 0
-; GFX1064-NEXT:    s_add_u32 s4, s4, s9
-; GFX1064-NEXT:    s_addc_u32 s9, 0, s5
-; GFX1064-NEXT:    s_add_i32 s12, s12, s4
-; GFX1064-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX1064-NEXT:    s_mul_hi_u32 s10, s2, s12
-; GFX1064-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX1064-NEXT:    s_mul_hi_u32 s4, s3, s12
-; GFX1064-NEXT:    s_addc_u32 s5, s8, s9
-; GFX1064-NEXT:    s_mul_i32 s8, s3, s12
-; GFX1064-NEXT:    s_mul_i32 s11, s2, s5
-; GFX1064-NEXT:    s_mul_hi_u32 s9, s2, s5
-; GFX1064-NEXT:    s_add_u32 s10, s10, s11
-; GFX1064-NEXT:    s_addc_u32 s9, 0, s9
-; GFX1064-NEXT:    s_mul_hi_u32 s12, s3, s5
-; GFX1064-NEXT:    s_add_u32 s8, s10, s8
+; GFX1064-NEXT:    s_mul_hi_u32 s11, s4, s8
+; GFX1064-NEXT:    s_add_u32 s9, s9, s10
+; GFX1064-NEXT:    s_mul_i32 s8, s4, s8
+; GFX1064-NEXT:    s_addc_u32 s9, s13, s12
+; GFX1064-NEXT:    s_addc_u32 s10, s11, 0
+; GFX1064-NEXT:    s_add_u32 s8, s9, s8
+; GFX1064-NEXT:    s_addc_u32 s9, 0, s10
+; GFX1064-NEXT:    s_add_i32 s5, s5, s8
+; GFX1064-NEXT:    s_addc_u32 s4, s4, s9
+; GFX1064-NEXT:    s_mul_hi_u32 s8, s2, s5
+; GFX1064-NEXT:    s_mul_i32 s11, s2, s4
+; GFX1064-NEXT:    s_mul_hi_u32 s10, s2, s4
+; GFX1064-NEXT:    s_mul_hi_u32 s9, s3, s5
 ; GFX1064-NEXT:    s_mul_i32 s5, s3, s5
-; GFX1064-NEXT:    s_addc_u32 s4, s9, s4
+; GFX1064-NEXT:    s_add_u32 s8, s8, s11
+; GFX1064-NEXT:    s_addc_u32 s10, 0, s10
+; GFX1064-NEXT:    s_mul_hi_u32 s12, s3, s4
+; GFX1064-NEXT:    s_add_u32 s5, s8, s5
+; GFX1064-NEXT:    s_mul_i32 s4, s3, s4
+; GFX1064-NEXT:    s_addc_u32 s5, s10, s9
 ; GFX1064-NEXT:    s_addc_u32 s8, s12, 0
-; GFX1064-NEXT:    s_add_u32 s10, s4, s5
+; GFX1064-NEXT:    s_add_u32 s10, s5, s4
 ; GFX1064-NEXT:    s_addc_u32 s11, 0, s8
 ; GFX1064-NEXT:    s_mul_hi_u32 s4, s0, s10
 ; GFX1064-NEXT:    s_mul_i32 s5, s0, s11
 ; GFX1064-NEXT:    s_mul_i32 s8, s1, s10
 ; GFX1064-NEXT:    s_add_i32 s4, s4, s5
-; GFX1064-NEXT:    s_add_i32 s12, s4, s8
+; GFX1064-NEXT:    s_add_i32 s8, s4, s8
 ; GFX1064-NEXT:    s_mul_i32 s4, s0, s10
-; GFX1064-NEXT:    s_sub_i32 s8, s3, s12
-; GFX1064-NEXT:    s_sub_i32 s13, s2, s4
+; GFX1064-NEXT:    s_sub_i32 s9, s3, s8
+; GFX1064-NEXT:    s_sub_i32 s12, s2, s4
 ; GFX1064-NEXT:    s_cselect_b64 s[4:5], 1, 0
-; GFX1064-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX1064-NEXT:    s_subb_u32 s14, s8, s1
-; GFX1064-NEXT:    s_sub_i32 s15, s13, s0
-; GFX1064-NEXT:    s_cselect_b64 s[8:9], 1, 0
-; GFX1064-NEXT:    s_cmp_lg_u64 s[8:9], 0
-; GFX1064-NEXT:    s_subb_u32 s8, s14, 0
-; GFX1064-NEXT:    s_cmp_ge_u32 s8, s1
-; GFX1064-NEXT:    s_cselect_b32 s9, -1, 0
-; GFX1064-NEXT:    s_cmp_ge_u32 s15, s0
+; GFX1064-NEXT:    s_subb_u32 s9, s9, s1
+; GFX1064-NEXT:    s_sub_i32 s13, s12, s0
+; GFX1064-NEXT:    s_subb_u32 s9, s9, 0
+; GFX1064-NEXT:    s_cmp_ge_u32 s9, s1
 ; GFX1064-NEXT:    s_cselect_b32 s14, -1, 0
-; GFX1064-NEXT:    s_cmp_eq_u32 s8, s1
-; GFX1064-NEXT:    s_cselect_b32 s8, s14, s9
-; GFX1064-NEXT:    s_add_u32 s9, s10, 1
+; GFX1064-NEXT:    s_cmp_ge_u32 s13, s0
+; GFX1064-NEXT:    s_cselect_b32 s13, -1, 0
+; GFX1064-NEXT:    s_cmp_eq_u32 s9, s1
+; GFX1064-NEXT:    s_cselect_b32 s9, s13, s14
+; GFX1064-NEXT:    s_add_u32 s13, s10, 1
 ; GFX1064-NEXT:    s_addc_u32 s14, s11, 0
 ; GFX1064-NEXT:    s_add_u32 s15, s10, 2
 ; GFX1064-NEXT:    s_addc_u32 s16, s11, 0
-; GFX1064-NEXT:    s_cmp_lg_u32 s8, 0
-; GFX1064-NEXT:    s_cselect_b32 s15, s15, s9
+; GFX1064-NEXT:    s_cmp_lg_u32 s9, 0
+; GFX1064-NEXT:    s_cselect_b32 s13, s15, s13
 ; GFX1064-NEXT:    s_cselect_b32 s14, s16, s14
 ; GFX1064-NEXT:    s_cmp_lg_u64 s[4:5], 0
-; GFX1064-NEXT:    s_subb_u32 s3, s3, s12
+; GFX1064-NEXT:    s_subb_u32 s3, s3, s8
 ; GFX1064-NEXT:    s_cmp_ge_u32 s3, s1
 ; GFX1064-NEXT:    s_cselect_b32 s4, -1, 0
-; GFX1064-NEXT:    s_cmp_ge_u32 s13, s0
+; GFX1064-NEXT:    s_cmp_ge_u32 s12, s0
 ; GFX1064-NEXT:    s_cselect_b32 s5, -1, 0
 ; GFX1064-NEXT:    s_cmp_eq_u32 s3, s1
 ; GFX1064-NEXT:    s_cselect_b32 s1, s5, s4
 ; GFX1064-NEXT:    s_cmp_lg_u32 s1, 0
 ; GFX1064-NEXT:    s_cselect_b32 s5, s14, s11
-; GFX1064-NEXT:    s_cselect_b32 s4, s15, s10
+; GFX1064-NEXT:    s_cselect_b32 s4, s13, s10
 ; GFX1064-NEXT:    s_cbranch_execnz .LBB15_3
 ; GFX1064-NEXT:  .LBB15_2:
 ; GFX1064-NEXT:    v_cvt_f32_u32_e32 v0, s0

>From a837a9c3de168ea83df82ac81fb455c53216d55c Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Tue, 16 Sep 2025 09:26:42 -0500
Subject: [PATCH 9/9] Limit search.  Make code clearer.

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 31 +++++++++++++----------
 1 file changed, 18 insertions(+), 13 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index fbacccb5efd3a..caeec51ecde89 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -5978,7 +5978,7 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     unsigned WaveSize = TRI->getRegSizeInBits(*Src2RC);
     assert(WaveSize == 64 || WaveSize == 32);
 
-    unsigned SelOpc =
+    unsigned SelectOpc =
         (WaveSize == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
     unsigned AddcSubbOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
     unsigned AddSubOpc = IsAdd ? AMDGPU::S_ADD_I32 : AMDGPU::S_SUB_I32;
@@ -6001,22 +6001,27 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     //  dead S_CSELECT*.
 
     bool RecalculateSCC{true};
-    MachineInstr *Def = MRI.getVRegDef(Src2.getReg());
-    if (Def && Def->getParent() == BB && Def->getOpcode() == SelOpc &&
-        Def->getOperand(1).isImm() && Def->getOperand(1).getImm() != 0 &&
-        Def->getOperand(2).isImm() && Def->getOperand(2).getImm() == 0) {
-
-      auto I1 = std::next(MachineBasicBlock::reverse_iterator(Def));
+    MachineInstr *SelectDef = MRI.getVRegDef(Src2.getReg());
+    if (SelectDef && SelectDef->getParent() == BB &&
+        SelectDef->getOpcode() == SelectOpc &&
+        SelectDef->getOperand(1).isImm() &&
+        SelectDef->getOperand(1).getImm() != 0 &&
+        SelectDef->getOperand(2).isImm() &&
+        SelectDef->getOperand(2).getImm() == 0) {
+      auto I1 = std::next(MachineBasicBlock::reverse_iterator(SelectDef));
       if (I1 != BB->rend() &&
           (I1->getOpcode() == AddSubOpc || I1->getOpcode() == AddcSubbOpc)) {
-        RecalculateSCC = false;
-        // Ensure there are no intervening definitions of SCC.
+        // Ensure there are no intervening definitions of SCC between ADDs/SUBs
+        const unsigned SearchLimit = 6;
+        unsigned Count = 0;
         for (auto I2 = std::next(MachineBasicBlock::reverse_iterator(MI));
-             I2 != I1; I2++) {
-          if (I2->definesRegister(AMDGPU::SCC, TRI)) {
-            RecalculateSCC = true;
+             Count < SearchLimit; I2++, Count++) {
+          if (I2 == I1) {
+            RecalculateSCC = false;
             break;
           }
+          if (I2->definesRegister(AMDGPU::SCC, TRI))
+            break;
         }
       }
     }
@@ -6056,7 +6061,7 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
         .add(Src0)
         .add(Src1);
 
-    BuildMI(*BB, MII, DL, TII->get(SelOpc), CarryDest.getReg())
+    BuildMI(*BB, MII, DL, TII->get(SelectOpc), CarryDest.getReg())
         .addImm(-1)
         .addImm(0);
 



More information about the llvm-commits mailing list