[llvm] fa630e7 - [RISCV][AMDGPU][TargetLowering] Special case overflow expansion for (uaddo X, 1).

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Apr 1 13:16:50 PDT 2022


Author: Craig Topper
Date: 2022-04-01T13:14:10-07:00
New Revision: fa630e7594b6b6e771ffda4ddffec38cf3256e29

URL: https://github.com/llvm/llvm-project/commit/fa630e7594b6b6e771ffda4ddffec38cf3256e29
DIFF: https://github.com/llvm/llvm-project/commit/fa630e7594b6b6e771ffda4ddffec38cf3256e29.diff

LOG: [RISCV][AMDGPU][TargetLowering] Special case overflow expansion for (uaddo X, 1).

If we expand (uaddo X, 1) we previously expanded the overflow calculation
as (X + 1) <u X. This potentially increases the live range of X and
can prevent X+1 from reusing the register that previously held X.

Since we're adding 1, overflow only occurs if X was UINT_MAX in which
case (X+1) would be 0. So this patch adds a special case to expand
the overflow calculation to (X+1) == 0.

This seems to help with uaddo intrinsics that get introduced by
CodeGenPrepare after LSR. Alternatively, we could block the uaddo
transform in CodeGenPrepare for this case.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D122933

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/sdiv64.ll
    llvm/test/CodeGen/AMDGPU/srem64.ll
    llvm/test/CodeGen/AMDGPU/udiv64.ll
    llvm/test/CodeGen/AMDGPU/urem64.ll
    llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
    llvm/test/CodeGen/RISCV/xaluo.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 4190b13cd4968..1fbc8a59ba6bc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -8723,8 +8723,18 @@ void TargetLowering::expandUADDSUBO(
   EVT ResultType = Node->getValueType(1);
   EVT SetCCType = getSetCCResultType(
       DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0));
-  ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT;
-  SDValue SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC);
+  SDValue SetCC;
+  if (IsAdd && isOneConstant(RHS)) {
+    // Special case: uaddo X, 1 overflowed if X+1 is 0. This potential reduces
+    // the live range of X. We assume comparing with 0 is cheap.
+    // TODO: This generalizes to (X + C) < C.
+    SetCC =
+        DAG.getSetCC(dl, SetCCType, Result,
+                     DAG.getConstant(0, dl, Node->getValueType(0)), ISD::SETEQ);
+  } else {
+    ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT;
+    SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC);
+  }
   Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType);
 }
 

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 1c3e889761f38..61f0eb131b3f5 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -6824,12 +6824,20 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
     Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
                       DAG.getValueType(MVT::i32));
 
-    // Sign extend the LHS and perform an unsigned compare with the ADDW result.
-    // Since the inputs are sign extended from i32, this is equivalent to
-    // comparing the lower 32 bits.
-    LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
-    SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
-                                    IsAdd ? ISD::SETULT : ISD::SETUGT);
+    SDValue Overflow;
+    if (IsAdd && isOneConstant(RHS)) {
+      // Special case uaddo X, 1 overflowed if the addition result is 0.
+      // FIXME: We can do this for any constant RHS by using (X + C) < C.
+      Overflow = DAG.getSetCC(DL, N->getValueType(1), Res,
+                              DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ);
+    } else {
+      // Sign extend the LHS and perform an unsigned compare with the ADDW
+      // result. Since the inputs are sign extended from i32, this is equivalent
+      // to comparing the lower 32 bits.
+      LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
+      Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
+                              IsAdd ? ISD::SETULT : ISD::SETUGT);
+    }
 
     Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
     Results.push_back(Overflow);

diff  --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 897faf871e4b7..daa60acbd60f9 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -149,86 +149,83 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_ashr_i32 s2, s9, 31
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[0:1], s[6:7]
 ; GCN-IR-NEXT:    s_mov_b32 s3, s2
-; GCN-IR-NEXT:    s_sub_u32 s10, s6, s0
-; GCN-IR-NEXT:    s_subb_u32 s11, s7, s0
+; GCN-IR-NEXT:    s_sub_u32 s12, s6, s0
+; GCN-IR-NEXT:    s_subb_u32 s13, s7, s0
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[2:3], s[8:9]
 ; GCN-IR-NEXT:    s_sub_u32 s6, s6, s2
 ; GCN-IR-NEXT:    s_subb_u32 s7, s7, s2
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[10:11], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[12:13], s[14:15]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
-; GCN-IR-NEXT:    s_min_u32 s14, s12, s13
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s10
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s11
-; GCN-IR-NEXT:    s_min_u32 s16, s12, s13
-; GCN-IR-NEXT:    s_sub_u32 s12, s14, s16
-; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[20:21], s[12:13], 63
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[10:11], s[14:15]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
+; GCN-IR-NEXT:    s_min_u32 s14, s10, s11
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s12
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s13
+; GCN-IR-NEXT:    s_min_u32 s18, s10, s11
+; GCN-IR-NEXT:    s_sub_u32 s10, s14, s18
+; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[20:21], s[10:11], 63
 ; GCN-IR-NEXT:    s_mov_b32 s15, 0
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[20:21]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[20:21], s[12:13], 63
-; GCN-IR-NEXT:    s_xor_b64 s[22:23], s[18:19], -1
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[20:21]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[20:21], s[10:11], 63
+; GCN-IR-NEXT:    s_xor_b64 s[22:23], s[16:17], -1
 ; GCN-IR-NEXT:    s_and_b64 s[20:21], s[22:23], s[20:21]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s18, s12, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
-; GCN-IR-NEXT:    s_addc_u32 s19, s13, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[18:19], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[10:11], s12
+; GCN-IR-NEXT:    s_add_u32 s16, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s17, s11, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[16:17], 0
+; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[20:21]
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[12:13], s10
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[18:19], s[10:11], s18
+; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[12:13], s16
 ; GCN-IR-NEXT:    s_add_u32 s20, s6, -1
 ; GCN-IR-NEXT:    s_addc_u32 s21, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_add_u32 s10, s8, s16
-; GCN-IR-NEXT:    s_mov_b32 s17, s15
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, s15
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s18
+; GCN-IR-NEXT:    s_mov_b32 s19, s15
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, s15
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[16:17], s[18:19], 1
-; GCN-IR-NEXT:    s_lshr_b32 s8, s13, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[16:17], s[16:17], 1
+; GCN-IR-NEXT:    s_lshr_b32 s8, s11, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[14:15], s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[14:15], s[10:11]
 ; GCN-IR-NEXT:    s_sub_u32 s8, s20, s16
 ; GCN-IR-NEXT:    s_subb_u32 s8, s21, s17
 ; GCN-IR-NEXT:    s_ashr_i32 s14, s8, 31
 ; GCN-IR-NEXT:    s_mov_b32 s15, s14
 ; GCN-IR-NEXT:    s_and_b32 s8, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[18:19], s[14:15], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s18, s16, s18
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    s_subb_u32 s19, s17, s19
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
-; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
+; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s16, s16, s14
+; GCN-IR-NEXT:    s_subb_u32 s17, s17, s15
+; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[8:9]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow6
-; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[12:13], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-IR-NEXT:    s_branch .LBB0_6
 ; GCN-IR-NEXT:  .LBB0_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[18:19]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[18:19]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[16:17]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[16:17]
 ; GCN-IR-NEXT:  .LBB0_6: ; %udiv-end
 ; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
 ; GCN-IR-NEXT:    v_xor_b32_e32 v0, s0, v0
@@ -413,12 +410,11 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v7
 ; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v8, vcc
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[14:15], v[7:8]
 ; GCN-IR-NEXT:    v_sub_i32_e64 v7, s[4:5], 63, v7
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[14:15]
 ; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[11:12], v7
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
@@ -431,6 +427,7 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v11, vcc, v0, v13
 ; GCN-IR-NEXT:    v_addc_u32_e32 v12, vcc, v9, v16, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v16, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v17, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
@@ -438,23 +435,21 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_lshl_b64 v[14:15], v[14:15], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 31, v8
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v14, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[7:8], 1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v9, vcc, v18, v0
+; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[7:8], 1
 ; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v19, v15, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v7, v16, v7
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, 1, v11
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v17, v8
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v13, 31, v9
-; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, 0, v12, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v11, vcc, 1, v11
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v16, v7
 ; GCN-IR-NEXT:    v_and_b32_e32 v9, 1, v13
-; GCN-IR-NEXT:    v_and_b32_e32 v20, v13, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v16, v13, v3
 ; GCN-IR-NEXT:    v_and_b32_e32 v13, v13, v2
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[16:17], v[11:12]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v16
+; GCN-IR-NEXT:    v_addc_u32_e32 v12, vcc, 0, v12, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[11:12]
 ; GCN-IR-NEXT:    v_sub_i32_e64 v14, s[4:5], v0, v13
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v17
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v17, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[4:5], v15, v16, s[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v17, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[4:5], v15, v20, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v16, v9
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
@@ -1002,86 +997,83 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_ashr_i32 s4, s5, 31
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[2:3], s[6:7]
 ; GCN-IR-NEXT:    s_mov_b32 s5, s4
-; GCN-IR-NEXT:    s_sub_u32 s10, s6, s2
-; GCN-IR-NEXT:    s_subb_u32 s11, s7, s2
+; GCN-IR-NEXT:    s_sub_u32 s12, s6, s2
+; GCN-IR-NEXT:    s_subb_u32 s13, s7, s2
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], s[8:9]
 ; GCN-IR-NEXT:    s_sub_u32 s6, s6, s4
 ; GCN-IR-NEXT:    s_subb_u32 s7, s7, s4
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[10:11], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[12:13], s[14:15]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
-; GCN-IR-NEXT:    s_min_u32 s14, s12, s13
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s10
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s11
-; GCN-IR-NEXT:    s_min_u32 s16, s12, s13
-; GCN-IR-NEXT:    s_sub_u32 s12, s14, s16
-; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[20:21], s[12:13], 63
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[10:11], s[14:15]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
+; GCN-IR-NEXT:    s_min_u32 s14, s10, s11
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s12
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s13
+; GCN-IR-NEXT:    s_min_u32 s18, s10, s11
+; GCN-IR-NEXT:    s_sub_u32 s10, s14, s18
+; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[20:21], s[10:11], 63
 ; GCN-IR-NEXT:    s_mov_b32 s15, 0
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[20:21]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[20:21], s[12:13], 63
-; GCN-IR-NEXT:    s_xor_b64 s[22:23], s[18:19], -1
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[20:21]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[20:21], s[10:11], 63
+; GCN-IR-NEXT:    s_xor_b64 s[22:23], s[16:17], -1
 ; GCN-IR-NEXT:    s_and_b64 s[20:21], s[22:23], s[20:21]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s18, s12, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
-; GCN-IR-NEXT:    s_addc_u32 s19, s13, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[18:19], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[10:11], s12
+; GCN-IR-NEXT:    s_add_u32 s16, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s17, s11, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[16:17], 0
+; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[20:21]
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[12:13], s10
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[18:19], s[10:11], s18
+; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[12:13], s16
 ; GCN-IR-NEXT:    s_add_u32 s20, s6, -1
 ; GCN-IR-NEXT:    s_addc_u32 s21, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_add_u32 s10, s8, s16
-; GCN-IR-NEXT:    s_mov_b32 s17, s15
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, s15
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s18
+; GCN-IR-NEXT:    s_mov_b32 s19, s15
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, s15
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[16:17], s[18:19], 1
-; GCN-IR-NEXT:    s_lshr_b32 s8, s13, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[16:17], s[16:17], 1
+; GCN-IR-NEXT:    s_lshr_b32 s8, s11, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[14:15], s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[14:15], s[10:11]
 ; GCN-IR-NEXT:    s_sub_u32 s8, s20, s16
 ; GCN-IR-NEXT:    s_subb_u32 s8, s21, s17
 ; GCN-IR-NEXT:    s_ashr_i32 s14, s8, 31
 ; GCN-IR-NEXT:    s_mov_b32 s15, s14
 ; GCN-IR-NEXT:    s_and_b32 s8, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[18:19], s[14:15], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s18, s16, s18
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    s_subb_u32 s19, s17, s19
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
-; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
+; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s16, s16, s14
+; GCN-IR-NEXT:    s_subb_u32 s17, s17, s15
+; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[8:9]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_3
 ; GCN-IR-NEXT:  .LBB9_4: ; %Flow3
-; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[12:13], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-IR-NEXT:    s_branch .LBB9_6
 ; GCN-IR-NEXT:  .LBB9_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[18:19]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[18:19]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[16:17]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[16:17]
 ; GCN-IR-NEXT:  .LBB9_6: ; %udiv-end
 ; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[4:5], s[2:3]
 ; GCN-IR-NEXT:    v_xor_b32_e32 v0, s2, v0
@@ -1230,60 +1222,57 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
-; GCN-IR-NEXT:    s_min_u32 s8, s6, s7
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 0xffffffc5
-; GCN-IR-NEXT:    s_addc_u32 s11, 0, -1
+; GCN-IR-NEXT:    s_min_u32 s10, s6, s7
+; GCN-IR-NEXT:    s_add_u32 s8, s10, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[10:11], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[8:9], 63
 ; GCN-IR-NEXT:    s_mov_b64 s[6:7], 0
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[14:15]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[14:15], s[10:11], 63
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[14:15], s[8:9], 63
 ; GCN-IR-NEXT:    s_xor_b64 s[16:17], s[12:13], -1
 ; GCN-IR-NEXT:    s_and_b64 s[14:15], s[16:17], s[14:15]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s12, s10, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    s_addc_u32 s13, s11, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[12:13], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s9, 63, s10
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[10:11], 24, s9
+; GCN-IR-NEXT:    s_add_u32 s12, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[12:13], 0
+; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], 24, s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[14:15], 24, s12
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], 24, s12
 ; GCN-IR-NEXT:    s_add_u32 s16, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s17, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
-; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_sub_u32 s10, 58, s10
+; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
-; GCN-IR-NEXT:    s_lshr_b32 s6, s11, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
-; GCN-IR-NEXT:    s_sub_u32 s6, s16, s14
-; GCN-IR-NEXT:    s_subb_u32 s6, s17, s15
-; GCN-IR-NEXT:    s_ashr_i32 s12, s6, 31
-; GCN-IR-NEXT:    s_mov_b32 s13, s12
-; GCN-IR-NEXT:    s_and_b32 s6, s12, 1
-; GCN-IR-NEXT:    s_and_b64 s[18:19], s[12:13], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s14, s14, s18
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-IR-NEXT:    s_subb_u32 s15, s15, s19
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
+; GCN-IR-NEXT:    s_lshr_b32 s6, s9, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[14:15], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s6, s16, s12
+; GCN-IR-NEXT:    s_subb_u32 s6, s17, s13
+; GCN-IR-NEXT:    s_ashr_i32 s14, s6, 31
+; GCN-IR-NEXT:    s_mov_b32 s15, s14
+; GCN-IR-NEXT:    s_and_b32 s6, s14, 1
+; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s14
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s15
+; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[6:7]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_3
 ; GCN-IR-NEXT:  .LBB10_4: ; %Flow5
-; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[10:11], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[2:3], s[6:7], s[2:3]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
@@ -1445,12 +1434,11 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v4
 ; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[10:11], v[4:5]
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], 24, v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
@@ -1461,6 +1449,7 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, 58, v8
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
@@ -1473,18 +1462,16 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v11, vcc
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v16, v12, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v17, v12, v0
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[12:13], v[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v12
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v17
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v13
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v16, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
@@ -1644,12 +1631,11 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v4
 ; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[10:11], v[4:5]
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[8:9], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
@@ -1661,6 +1647,7 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, 47, v8
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
@@ -1673,18 +1660,16 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v11, vcc
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v16, v12, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v17, v12, v0
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[12:13], v[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v12
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v17
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v13
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v16, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
@@ -1746,12 +1731,11 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v3
 ; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v4, vcc
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[9:10], v[3:4]
 ; GCN-IR-NEXT:    v_sub_i32_e64 v3, s[4:5], 63, v3
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[9:10]
 ; GCN-IR-NEXT:    v_lshl_b64 v[3:4], v[7:8], v3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
@@ -1760,6 +1744,7 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 0xffffffcf, v0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v8, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
@@ -1768,20 +1753,18 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_lshl_b64 v[9:10], v[9:10], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 31, v4
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v9, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[3:4], v[3:4], 1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v5, vcc, s12, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, 0, v10, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v11, vcc, 1, v7
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v7
+; GCN-IR-NEXT:    v_lshl_b64 v[3:4], v[3:4], 1
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v9, 31, v5
-; GCN-IR-NEXT:    v_addc_u32_e32 v12, vcc, 0, v8, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v8, vcc
 ; GCN-IR-NEXT:    v_and_b32_e32 v5, 1, v9
 ; GCN-IR-NEXT:    v_and_b32_e32 v9, 0x8000, v9
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[11:12], v[7:8]
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, v11
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[7:8]
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
 ; GCN-IR-NEXT:    v_sub_i32_e64 v9, s[4:5], v0, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v12
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
 ; GCN-IR-NEXT:    v_subbrev_u32_e64 v10, s[4:5], 0, v10, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]

diff  --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index af1115d578643..b227e29b2e389 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -130,77 +130,74 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[4:5], 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s4
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[10:11]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_add_i32 s14, s12, 32
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[8:9], s[10:11]
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s5
-; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
-; GCN-IR-NEXT:    s_min_u32 s8, s12, s8
-; GCN-IR-NEXT:    s_min_u32 s12, s10, s11
-; GCN-IR-NEXT:    s_sub_u32 s10, s8, s12
-; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[10:11], 63
-; GCN-IR-NEXT:    s_mov_b32 s9, 0
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[16:17], s[10:11], 63
-; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[14:15], -1
+; GCN-IR-NEXT:    s_min_u32 s10, s14, s8
+; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s2
+; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s3
+; GCN-IR-NEXT:    s_min_u32 s14, s8, s9
+; GCN-IR-NEXT:    s_sub_u32 s8, s10, s14
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[8:9], 63
+; GCN-IR-NEXT:    s_mov_b32 s11, 0
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[16:17]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[16:17], s[8:9], 63
+; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[12:13], -1
 ; GCN-IR-NEXT:    s_and_b64 s[16:17], s[18:19], s[16:17]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s10, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    s_addc_u32 s15, s11, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[14:15], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[2:3], s10
+; GCN-IR-NEXT:    s_add_u32 s12, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 0
+; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[2:3], s14
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s12
 ; GCN-IR-NEXT:    s_add_u32 s16, s4, -1
 ; GCN-IR-NEXT:    s_addc_u32 s17, s5, -1
-; GCN-IR-NEXT:    s_not_b64 s[6:7], s[8:9]
-; GCN-IR-NEXT:    s_mov_b32 s13, s9
-; GCN-IR-NEXT:    s_add_u32 s8, s6, s12
-; GCN-IR-NEXT:    s_addc_u32 s9, s7, s9
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_not_b64 s[6:7], s[10:11]
+; GCN-IR-NEXT:    s_mov_b32 s15, s11
+; GCN-IR-NEXT:    s_add_u32 s10, s6, s14
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, s11
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
-; GCN-IR-NEXT:    s_lshr_b32 s6, s11, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
-; GCN-IR-NEXT:    s_sub_u32 s6, s16, s14
-; GCN-IR-NEXT:    s_subb_u32 s6, s17, s15
-; GCN-IR-NEXT:    s_ashr_i32 s12, s6, 31
-; GCN-IR-NEXT:    s_mov_b32 s13, s12
-; GCN-IR-NEXT:    s_and_b32 s6, s12, 1
-; GCN-IR-NEXT:    s_and_b64 s[18:19], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_sub_u32 s14, s14, s18
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-IR-NEXT:    s_subb_u32 s15, s15, s19
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
+; GCN-IR-NEXT:    s_lshr_b32 s6, s9, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[14:15], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s6, s16, s12
+; GCN-IR-NEXT:    s_subb_u32 s6, s17, s13
+; GCN-IR-NEXT:    s_ashr_i32 s14, s6, 31
+; GCN-IR-NEXT:    s_mov_b32 s15, s14
+; GCN-IR-NEXT:    s_and_b32 s6, s14, 1
+; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[4:5]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s14
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s15
+; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[6:7]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow6
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[10:11], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[6:7], s[6:7], s[8:9]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-IR-NEXT:    s_branch .LBB0_6
 ; GCN-IR-NEXT:  .LBB0_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[14:15]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[12:13]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[12:13]
 ; GCN-IR-NEXT:  .LBB0_6: ; %udiv-end
 ; GCN-IR-NEXT:    v_mul_lo_u32 v1, s4, v1
 ; GCN-IR-NEXT:    v_mul_hi_u32 v2, s4, v0
@@ -387,12 +384,11 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v7
 ; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v8, vcc
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[14:15], v[7:8]
 ; GCN-IR-NEXT:    v_sub_i32_e64 v7, s[4:5], 63, v7
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[14:15]
 ; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[0:1], v7
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
@@ -405,6 +401,7 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v11, vcc, v3, v12
 ; GCN-IR-NEXT:    v_mov_b32_e32 v16, 0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v12, vcc, v9, v13, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v17, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
@@ -412,23 +409,21 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_lshl_b64 v[14:15], v[14:15], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v3, 31, v8
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v14, v3
-; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[7:8], 1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v9, vcc, v18, v3
+; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[7:8], 1
 ; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v19, v15, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v7, v16, v7
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, 1, v11
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v17, v8
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v13, 31, v9
-; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, 0, v12, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v11, vcc, 1, v11
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v16, v7
 ; GCN-IR-NEXT:    v_and_b32_e32 v9, 1, v13
-; GCN-IR-NEXT:    v_and_b32_e32 v20, v13, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v16, v13, v6
 ; GCN-IR-NEXT:    v_and_b32_e32 v13, v13, v5
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[16:17], v[11:12]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v16
+; GCN-IR-NEXT:    v_addc_u32_e32 v12, vcc, 0, v12, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[11:12]
 ; GCN-IR-NEXT:    v_sub_i32_e64 v14, s[4:5], v3, v13
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v17
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v17, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[4:5], v15, v16, s[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v17, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[4:5], v15, v20, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v16, v9
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
@@ -1020,78 +1015,75 @@ define amdgpu_kernel void @s_test_srem33_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[6:7], 0
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[10:11], s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[10:11], s[12:13]
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s8
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s2
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s9
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s3
-; GCN-IR-NEXT:    s_min_u32 s10, s10, s11
-; GCN-IR-NEXT:    s_min_u32 s14, s12, s13
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s14
-; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[12:13], 63
-; GCN-IR-NEXT:    s_mov_b32 s11, 0
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[18:19], s[12:13], 63
-; GCN-IR-NEXT:    s_xor_b64 s[20:21], s[16:17], -1
+; GCN-IR-NEXT:    s_min_u32 s12, s10, s11
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
+; GCN-IR-NEXT:    s_min_u32 s16, s10, s11
+; GCN-IR-NEXT:    s_sub_u32 s10, s12, s16
+; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[10:11], 63
+; GCN-IR-NEXT:    s_mov_b32 s13, 0
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[18:19], s[10:11], 63
+; GCN-IR-NEXT:    s_xor_b64 s[20:21], s[14:15], -1
 ; GCN-IR-NEXT:    s_and_b64 s[18:19], s[20:21], s[18:19]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s16, s12, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
-; GCN-IR-NEXT:    s_addc_u32 s17, s13, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[16:17], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[2:3], s12
+; GCN-IR-NEXT:    s_add_u32 s14, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s15, s11, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[14:15], 0
+; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[18:19]
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[2:3], s10
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[2:3], s16
+; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[2:3], s14
 ; GCN-IR-NEXT:    s_add_u32 s18, s8, -1
 ; GCN-IR-NEXT:    s_addc_u32 s19, s9, -1
-; GCN-IR-NEXT:    s_not_b64 s[6:7], s[10:11]
-; GCN-IR-NEXT:    s_mov_b32 s15, s11
-; GCN-IR-NEXT:    s_add_u32 s10, s6, s14
-; GCN-IR-NEXT:    s_addc_u32 s11, s7, s11
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_not_b64 s[6:7], s[12:13]
+; GCN-IR-NEXT:    s_mov_b32 s17, s13
+; GCN-IR-NEXT:    s_add_u32 s12, s6, s16
+; GCN-IR-NEXT:    s_addc_u32 s13, s7, s13
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:  .LBB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[16:17], s[16:17], 1
-; GCN-IR-NEXT:    s_lshr_b32 s6, s13, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[6:7]
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[14:15], s[12:13]
-; GCN-IR-NEXT:    s_sub_u32 s6, s18, s16
-; GCN-IR-NEXT:    s_subb_u32 s6, s19, s17
-; GCN-IR-NEXT:    s_ashr_i32 s14, s6, 31
-; GCN-IR-NEXT:    s_mov_b32 s15, s14
-; GCN-IR-NEXT:    s_and_b32 s6, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[20:21], s[14:15], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s16, s16, s20
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    s_subb_u32 s17, s17, s21
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
-; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[6:7]
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
+; GCN-IR-NEXT:    s_lshr_b32 s6, s11, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[16:17], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s6, s18, s14
+; GCN-IR-NEXT:    s_subb_u32 s6, s19, s15
+; GCN-IR-NEXT:    s_ashr_i32 s16, s6, 31
+; GCN-IR-NEXT:    s_mov_b32 s17, s16
+; GCN-IR-NEXT:    s_and_b32 s6, s16, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[16:17], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s14, s14, s16
+; GCN-IR-NEXT:    s_subb_u32 s15, s15, s17
+; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], s[6:7]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_3
 ; GCN-IR-NEXT:  .LBB8_4: ; %Flow6
-; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[12:13], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[6:7], s[6:7], s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-IR-NEXT:    s_branch .LBB8_6
 ; GCN-IR-NEXT:  .LBB8_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[16:17]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[14:15]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[16:17]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[14:15]
 ; GCN-IR-NEXT:  .LBB8_6: ; %udiv-end
 ; GCN-IR-NEXT:    v_mul_lo_u32 v1, s8, v1
 ; GCN-IR-NEXT:    v_mul_hi_u32 v2, s8, v0
@@ -1181,78 +1173,75 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[4:5], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[10:11], s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[10:11], s[12:13]
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s4
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s5
-; GCN-IR-NEXT:    s_min_u32 s10, s10, s11
-; GCN-IR-NEXT:    s_min_u32 s14, s12, s13
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s14
-; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[12:13], 63
-; GCN-IR-NEXT:    s_mov_b32 s11, 0
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[18:19], s[12:13], 63
-; GCN-IR-NEXT:    s_xor_b64 s[20:21], s[16:17], -1
+; GCN-IR-NEXT:    s_min_u32 s12, s10, s11
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s4
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s5
+; GCN-IR-NEXT:    s_min_u32 s16, s10, s11
+; GCN-IR-NEXT:    s_sub_u32 s10, s12, s16
+; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[10:11], 63
+; GCN-IR-NEXT:    s_mov_b32 s13, 0
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[18:19], s[10:11], 63
+; GCN-IR-NEXT:    s_xor_b64 s[20:21], s[14:15], -1
 ; GCN-IR-NEXT:    s_and_b64 s[18:19], s[20:21], s[18:19]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s16, s12, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s12
-; GCN-IR-NEXT:    s_addc_u32 s17, s13, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s13
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[16:17], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[4:5], s12
+; GCN-IR-NEXT:    s_add_u32 s14, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s15, s11, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[14:15], 0
+; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[18:19]
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[4:5], s10
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[4:5], s16
+; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[4:5], s14
 ; GCN-IR-NEXT:    s_add_u32 s18, s6, -1
 ; GCN-IR-NEXT:    s_addc_u32 s19, s7, -1
-; GCN-IR-NEXT:    s_not_b64 s[8:9], s[10:11]
-; GCN-IR-NEXT:    s_mov_b32 s15, s11
-; GCN-IR-NEXT:    s_add_u32 s10, s8, s14
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, s11
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_not_b64 s[8:9], s[12:13]
+; GCN-IR-NEXT:    s_mov_b32 s17, s13
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s16
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, s13
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[16:17], s[16:17], 1
-; GCN-IR-NEXT:    s_lshr_b32 s8, s13, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[14:15], s[12:13]
-; GCN-IR-NEXT:    s_sub_u32 s8, s18, s16
-; GCN-IR-NEXT:    s_subb_u32 s8, s19, s17
-; GCN-IR-NEXT:    s_ashr_i32 s14, s8, 31
-; GCN-IR-NEXT:    s_mov_b32 s15, s14
-; GCN-IR-NEXT:    s_and_b32 s8, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[20:21], s[14:15], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s16, s16, s20
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    s_subb_u32 s17, s17, s21
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
-; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[8:9]
+; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
+; GCN-IR-NEXT:    s_lshr_b32 s8, s11, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[16:17], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s8, s18, s14
+; GCN-IR-NEXT:    s_subb_u32 s8, s19, s15
+; GCN-IR-NEXT:    s_ashr_i32 s16, s8, 31
+; GCN-IR-NEXT:    s_mov_b32 s17, s16
+; GCN-IR-NEXT:    s_and_b32 s8, s16, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[16:17], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s14, s14, s16
+; GCN-IR-NEXT:    s_subb_u32 s15, s15, s17
+; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
+; GCN-IR-NEXT:    s_mov_b64 s[16:17], s[8:9]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_3
 ; GCN-IR-NEXT:  .LBB9_4: ; %Flow3
-; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[12:13], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
 ; GCN-IR-NEXT:    s_branch .LBB9_6
 ; GCN-IR-NEXT:  .LBB9_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s5
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[16:17]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[14:15]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[16:17]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[14:15]
 ; GCN-IR-NEXT:  .LBB9_6: ; %udiv-end
 ; GCN-IR-NEXT:    v_mul_lo_u32 v1, s6, v1
 ; GCN-IR-NEXT:    v_mul_hi_u32 v2, s6, v0
@@ -1403,60 +1392,57 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s4
 ; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s5
-; GCN-IR-NEXT:    s_min_u32 s6, s2, s3
-; GCN-IR-NEXT:    s_add_u32 s8, s6, 0xffffffc5
-; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
+; GCN-IR-NEXT:    s_min_u32 s8, s2, s3
+; GCN-IR-NEXT:    s_add_u32 s6, s8, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s7, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[4:5], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[6:7], 63
 ; GCN-IR-NEXT:    s_mov_b64 s[2:3], 0
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[6:7], 63
 ; GCN-IR-NEXT:    s_xor_b64 s[14:15], s[10:11], -1
 ; GCN-IR-NEXT:    s_and_b64 s[12:13], s[14:15], s[12:13]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s7, 63, s8
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], 24, s7
+; GCN-IR-NEXT:    s_add_u32 s10, s6, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
+; GCN-IR-NEXT:    s_sub_i32 s6, 63, s6
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s6
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], 24, s10
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
 ; GCN-IR-NEXT:    s_add_u32 s14, s4, -1
 ; GCN-IR-NEXT:    s_addc_u32 s15, s5, -1
-; GCN-IR-NEXT:    s_sub_u32 s6, 58, s6
-; GCN-IR-NEXT:    s_subb_u32 s7, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s3, 0
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
-; GCN-IR-NEXT:    s_lshr_b32 s2, s9, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[2:3]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s2, s14, s12
-; GCN-IR-NEXT:    s_subb_u32 s2, s15, s13
-; GCN-IR-NEXT:    s_ashr_i32 s10, s2, 31
-; GCN-IR-NEXT:    s_mov_b32 s11, s10
-; GCN-IR-NEXT:    s_and_b32 s2, s10, 1
-; GCN-IR-NEXT:    s_and_b64 s[16:17], s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s16
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, s17
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
-; GCN-IR-NEXT:    s_add_u32 s6, s6, 1
-; GCN-IR-NEXT:    s_addc_u32 s7, s7, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[2:3]
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_lshr_b32 s2, s7, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s2, s14, s10
+; GCN-IR-NEXT:    s_subb_u32 s2, s15, s11
+; GCN-IR-NEXT:    s_ashr_i32 s12, s2, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s2, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], s[4:5]
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, s13
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[2:3]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_3
 ; GCN-IR-NEXT:  .LBB10_4: ; %Flow5
-; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[8:9], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[2:3], s[2:3], s[6:7]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
@@ -1616,11 +1602,10 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v3
 ; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v4, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v3
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[8:9], v[3:4]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
@@ -1631,6 +1616,7 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
@@ -1643,18 +1629,16 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v14, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v15, v10, v0
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[10:11], v[6:7]
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, v10
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v15
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, v11
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v14, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
@@ -1812,12 +1796,11 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[8:9], v[2:3]
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
@@ -1829,6 +1812,7 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
@@ -1841,18 +1825,16 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v14, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v15, v10, v0
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[10:11], v[6:7]
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, v10
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v15
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, v11
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v14, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
@@ -1920,12 +1902,11 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v4
 ; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[9:10], v[4:5]
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[9:10]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
@@ -1934,6 +1915,7 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 0xffffffcf, v8
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
@@ -1942,20 +1924,18 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
 ; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, s12, v10
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
 ; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v14, 0x8000, v12
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v12, 0x8000, v12
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[12:13], v[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v12
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v14
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v13
+; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
 ; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
 ; GCN-IR-NEXT:    v_subbrev_u32_e64 v11, s[4:5], 0, v11, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index c23aff6d25fd6..20fa20a3d0735 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -131,47 +131,45 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[4:5], 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s4
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[10:11]
+; GCN-IR-NEXT:    s_add_i32 s14, s12, 32
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[8:9], s[10:11]
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s5
-; GCN-IR-NEXT:    s_min_u32 s10, s12, s8
+; GCN-IR-NEXT:    s_min_u32 s10, s14, s8
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s2
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s3
-; GCN-IR-NEXT:    s_min_u32 s12, s8, s9
-; GCN-IR-NEXT:    s_sub_u32 s8, s10, s12
+; GCN-IR-NEXT:    s_min_u32 s14, s8, s9
+; GCN-IR-NEXT:    s_sub_u32 s8, s10, s14
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[8:9], 63
 ; GCN-IR-NEXT:    s_mov_b32 s11, 0
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[16:17]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[16:17], s[8:9], 63
-; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[14:15], -1
+; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[12:13], -1
 ; GCN-IR-NEXT:    s_and_b64 s[16:17], s[18:19], s[16:17]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s8, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-IR-NEXT:    s_addc_u32 s15, s9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[14:15], v[0:1]
+; GCN-IR-NEXT:    s_add_u32 s12, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 0
 ; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[2:3], s14
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s12
 ; GCN-IR-NEXT:    s_add_u32 s16, s4, -1
 ; GCN-IR-NEXT:    s_addc_u32 s17, s5, -1
 ; GCN-IR-NEXT:    s_not_b64 s[2:3], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s2, s2, s12
-; GCN-IR-NEXT:    s_mov_b32 s13, s11
+; GCN-IR-NEXT:    s_add_u32 s2, s2, s14
+; GCN-IR-NEXT:    s_mov_b32 s15, s11
 ; GCN-IR-NEXT:    s_addc_u32 s3, s3, s11
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[14:15], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
 ; GCN-IR-NEXT:    s_lshr_b32 s6, s9, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[6:7]
@@ -181,15 +179,14 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    s_ashr_i32 s10, s6, 31
 ; GCN-IR-NEXT:    s_mov_b32 s11, s10
 ; GCN-IR-NEXT:    s_and_b32 s6, s10, 1
-; GCN-IR-NEXT:    s_and_b64 s[14:15], s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_sub_u32 s14, s12, s14
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-IR-NEXT:    s_subb_u32 s15, s13, s15
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s10
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s11
 ; GCN-IR-NEXT:    s_add_u32 s2, s2, 1
 ; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[2:3], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[6:7]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow6
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[8:9], 1
@@ -199,9 +196,9 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    s_branch .LBB0_6
 ; GCN-IR-NEXT:  .LBB0_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[14:15]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[12:13]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[12:13]
 ; GCN-IR-NEXT:  .LBB0_6: ; %udiv-end
 ; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s2, -1
@@ -354,11 +351,10 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v6
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v7, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[12:13], v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[12:13]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
@@ -371,6 +367,7 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, v1, v11, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
@@ -383,18 +380,16 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v11, vcc
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v8, v4
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v6
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v13, v8, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v12, v8, v2
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v9, v5
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[8:9], v[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, v8
-; GCN-IR-NEXT:    v_sub_i32_e64 v12, s[4:5], v10, v12
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, v9
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v12, s[4:5], v10, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[4:5], v11, v9, s[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, v7
-; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[4:5], v11, v13, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v6
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
@@ -813,83 +808,80 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_and_b32 s0, s3, s2
 ; GCN-IR-NEXT:    s_and_b32 s3, s7, s8
 ; GCN-IR-NEXT:    s_and_b32 s2, s6, s2
-; GCN-IR-NEXT:    s_lshr_b64 s[6:7], s[0:1], 24
+; GCN-IR-NEXT:    s_lshr_b64 s[8:9], s[0:1], 24
 ; GCN-IR-NEXT:    s_lshr_b64 s[2:3], s[2:3], 24
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[0:1], 0
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[10:11]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s2
-; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s3
-; GCN-IR-NEXT:    s_min_u32 s10, s8, s9
-; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
-; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s7
-; GCN-IR-NEXT:    s_min_u32 s12, s8, s9
-; GCN-IR-NEXT:    s_sub_u32 s8, s10, s12
-; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[8:9], 63
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[6:7], s[10:11]
+; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
+; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
+; GCN-IR-NEXT:    s_min_u32 s10, s6, s7
+; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s8
+; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s9
+; GCN-IR-NEXT:    s_min_u32 s14, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s6, s10, s14
+; GCN-IR-NEXT:    s_subb_u32 s7, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[6:7], 63
 ; GCN-IR-NEXT:    s_mov_b32 s11, 0
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[16:17], s[8:9], 63
-; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[14:15], -1
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[16:17]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[16:17], s[6:7], 63
+; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[12:13], -1
 ; GCN-IR-NEXT:    s_and_b64 s[16:17], s[18:19], s[16:17]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s8, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-IR-NEXT:    s_addc_u32 s15, s9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[14:15], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[6:7], s8
+; GCN-IR-NEXT:    s_add_u32 s12, s6, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s7, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 0
+; GCN-IR-NEXT:    s_sub_i32 s6, 63, s6
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[8:9], s6
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[6:7], s14
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[8:9], s12
 ; GCN-IR-NEXT:    s_add_u32 s16, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s17, s3, -1
 ; GCN-IR-NEXT:    s_not_b64 s[0:1], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s6, s0, s12
-; GCN-IR-NEXT:    s_mov_b32 s13, s11
-; GCN-IR-NEXT:    s_addc_u32 s7, s1, s11
+; GCN-IR-NEXT:    s_add_u32 s8, s0, s14
+; GCN-IR-NEXT:    s_mov_b32 s15, s11
+; GCN-IR-NEXT:    s_addc_u32 s9, s1, s11
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s1, 0
 ; GCN-IR-NEXT:  .LBB7_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[14:15], 1
-; GCN-IR-NEXT:    s_lshr_b32 s0, s9, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
+; GCN-IR-NEXT:    s_lshr_b32 s0, s7, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[10:11], s[6:7]
 ; GCN-IR-NEXT:    s_sub_u32 s0, s16, s12
 ; GCN-IR-NEXT:    s_subb_u32 s0, s17, s13
 ; GCN-IR-NEXT:    s_ashr_i32 s10, s0, 31
 ; GCN-IR-NEXT:    s_mov_b32 s11, s10
 ; GCN-IR-NEXT:    s_and_b32 s0, s10, 1
-; GCN-IR-NEXT:    s_and_b64 s[14:15], s[10:11], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s14, s12, s14
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    s_subb_u32 s15, s13, s15
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
-; GCN-IR-NEXT:    s_add_u32 s6, s6, 1
-; GCN-IR-NEXT:    s_addc_u32 s7, s7, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[10:11], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s10
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s11
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[0:1]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_3
 ; GCN-IR-NEXT:  .LBB7_4: ; %Flow3
-; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[8:9], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[2:3]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s1
 ; GCN-IR-NEXT:    s_branch .LBB7_6
 ; GCN-IR-NEXT:  .LBB7_5:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[14:15]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[12:13]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[12:13]
 ; GCN-IR-NEXT:  .LBB7_6: ; %udiv-end
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s6, -1
@@ -1017,60 +1009,57 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s3
 ; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
-; GCN-IR-NEXT:    s_min_u32 s6, s4, s5
-; GCN-IR-NEXT:    s_add_u32 s8, s6, 0xffffffc5
-; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
+; GCN-IR-NEXT:    s_min_u32 s8, s4, s5
+; GCN-IR-NEXT:    s_add_u32 s6, s8, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s7, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[6:7], 63
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[6:7], 63
 ; GCN-IR-NEXT:    s_xor_b64 s[14:15], s[10:11], -1
 ; GCN-IR-NEXT:    s_and_b64 s[12:13], s[14:15], s[12:13]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s7, 63, s8
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], 24, s7
+; GCN-IR-NEXT:    s_add_u32 s10, s6, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
+; GCN-IR-NEXT:    s_sub_i32 s6, 63, s6
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s6
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], 24, s10
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
 ; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s6, 58, s6
-; GCN-IR-NEXT:    s_subb_u32 s7, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
-; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s4, s14, s12
-; GCN-IR-NEXT:    s_subb_u32 s4, s15, s13
-; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s11, s10
-; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
-; GCN-IR-NEXT:    s_and_b64 s[16:17], s[10:11], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s16
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, s17
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
-; GCN-IR-NEXT:    s_add_u32 s6, s6, 1
-; GCN-IR-NEXT:    s_addc_u32 s7, s7, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, s14, s10
+; GCN-IR-NEXT:    s_subb_u32 s4, s15, s11
+; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, s13
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_3
 ; GCN-IR-NEXT:  .LBB8_4: ; %Flow5
-; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[8:9], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[2:3], s[4:5], s[2:3]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
@@ -1215,11 +1204,10 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
 ; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[8:9], v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
@@ -1231,6 +1219,7 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
@@ -1243,18 +1232,16 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v14, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v15, v10, v0
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[10:11], v[6:7]
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, v10
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v15
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, v11
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v14, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
@@ -1306,11 +1293,10 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
 ; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[7:8], v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_5
@@ -1319,6 +1305,7 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffcf, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
@@ -1327,20 +1314,18 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[7:8], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v7, v4
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v6
 ; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v8, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v9, v2
-; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v10, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v7, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v7
 ; GCN-IR-NEXT:    v_and_b32_e32 v7, 0x8000, v7
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[9:10], v[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, v9
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v10, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v9, v2
 ; GCN-IR-NEXT:    v_sub_i32_e64 v7, s[4:5], v6, v7
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, v5
 ; GCN-IR-NEXT:    v_subbrev_u32_e64 v8, s[4:5], 0, v8, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
@@ -1476,54 +1461,51 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s3
 ; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
-; GCN-IR-NEXT:    s_min_u32 s8, s4, s5
-; GCN-IR-NEXT:    s_sub_u32 s6, 59, s8
+; GCN-IR-NEXT:    s_min_u32 s10, s4, s5
+; GCN-IR-NEXT:    s_sub_u32 s6, 59, s10
 ; GCN-IR-NEXT:    s_subb_u32 s7, 0, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[6:7], 63
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[12:13]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[6:7], 63
-; GCN-IR-NEXT:    s_xor_b64 s[14:15], s[10:11], -1
+; GCN-IR-NEXT:    s_xor_b64 s[14:15], s[8:9], -1
 ; GCN-IR-NEXT:    s_and_b64 s[12:13], s[14:15], s[12:13]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s6, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    s_addc_u32 s11, s7, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
+; GCN-IR-NEXT:    s_add_u32 s8, s6, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s7, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 0
 ; GCN-IR-NEXT:    s_sub_i32 s6, 63, s6
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s6
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[10:11], s[2:3], s10
-; GCN-IR-NEXT:    s_add_u32 s2, s8, 0xffffffc4
+; GCN-IR-NEXT:    s_lshr_b64 s[8:9], s[2:3], s8
+; GCN-IR-NEXT:    s_add_u32 s2, s10, 0xffffffc4
 ; GCN-IR-NEXT:    s_addc_u32 s3, 0, -1
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s4, 23, s10
-; GCN-IR-NEXT:    s_subb_u32 s4, 0, s11
-; GCN-IR-NEXT:    s_ashr_i32 s8, s4, 31
-; GCN-IR-NEXT:    s_and_b32 s4, s8, 1
-; GCN-IR-NEXT:    s_and_b32 s8, s8, 24
-; GCN-IR-NEXT:    s_sub_u32 s10, s10, s8
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-IR-NEXT:    s_subb_u32 s11, s11, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s3
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[10:11], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, 23, s8
+; GCN-IR-NEXT:    s_subb_u32 s4, 0, s9
+; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
+; GCN-IR-NEXT:    s_and_b32 s10, s10, 24
+; GCN-IR-NEXT:    s_sub_u32 s8, s8, s10
+; GCN-IR-NEXT:    s_subb_u32 s9, s9, 0
 ; GCN-IR-NEXT:    s_add_u32 s2, s2, 1
 ; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_3
 ; GCN-IR-NEXT:  .LBB11_4: ; %Flow5
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[6:7], 1
@@ -1533,9 +1515,9 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_branch .LBB11_6
 ; GCN-IR-NEXT:  .LBB11_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[10:11]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[8:9]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[10:11]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[8:9]
 ; GCN-IR-NEXT:  .LBB11_6: ; %udiv-end
 ; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s2, -1
@@ -1668,11 +1650,10 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
 ; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[7:8], v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
@@ -1681,6 +1662,7 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc4, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
@@ -1688,20 +1670,18 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[7:8], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v7, v4
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 23, v6
 ; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v8, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v9, v2
-; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v10, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v7, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v7
 ; GCN-IR-NEXT:    v_and_b32_e32 v7, 24, v7
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[9:10], v[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, v9
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v10, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v9, v2
 ; GCN-IR-NEXT:    v_sub_i32_e64 v7, s[4:5], v6, v7
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, v5
 ; GCN-IR-NEXT:    v_subbrev_u32_e64 v8, s[4:5], 0, v8, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]

diff  --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index e609f57194c43..ef6e67f64a26e 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -130,77 +130,74 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[4:5], 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s4
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[10:11]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
-; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_add_i32 s14, s12, 32
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[8:9], s[10:11]
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s5
-; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
-; GCN-IR-NEXT:    s_min_u32 s8, s12, s8
-; GCN-IR-NEXT:    s_min_u32 s12, s10, s11
-; GCN-IR-NEXT:    s_sub_u32 s10, s8, s12
-; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[10:11], 63
-; GCN-IR-NEXT:    s_mov_b32 s9, 0
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[16:17], s[10:11], 63
-; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[14:15], -1
+; GCN-IR-NEXT:    s_min_u32 s10, s14, s8
+; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s2
+; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s3
+; GCN-IR-NEXT:    s_min_u32 s14, s8, s9
+; GCN-IR-NEXT:    s_sub_u32 s8, s10, s14
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[8:9], 63
+; GCN-IR-NEXT:    s_mov_b32 s11, 0
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[16:17]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[16:17], s[8:9], 63
+; GCN-IR-NEXT:    s_xor_b64 s[18:19], s[12:13], -1
 ; GCN-IR-NEXT:    s_and_b64 s[16:17], s[18:19], s[16:17]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s10, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    s_addc_u32 s15, s11, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s11
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[14:15], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[2:3], s10
+; GCN-IR-NEXT:    s_add_u32 s12, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 0
+; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[2:3], s14
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s12
 ; GCN-IR-NEXT:    s_add_u32 s16, s4, -1
 ; GCN-IR-NEXT:    s_addc_u32 s17, s5, -1
-; GCN-IR-NEXT:    s_not_b64 s[6:7], s[8:9]
-; GCN-IR-NEXT:    s_mov_b32 s13, s9
-; GCN-IR-NEXT:    s_add_u32 s8, s6, s12
-; GCN-IR-NEXT:    s_addc_u32 s9, s7, s9
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_not_b64 s[6:7], s[10:11]
+; GCN-IR-NEXT:    s_mov_b32 s15, s11
+; GCN-IR-NEXT:    s_add_u32 s10, s6, s14
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, s11
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[14:15], s[14:15], 1
-; GCN-IR-NEXT:    s_lshr_b32 s6, s11, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
-; GCN-IR-NEXT:    s_sub_u32 s6, s16, s14
-; GCN-IR-NEXT:    s_subb_u32 s6, s17, s15
-; GCN-IR-NEXT:    s_ashr_i32 s12, s6, 31
-; GCN-IR-NEXT:    s_mov_b32 s13, s12
-; GCN-IR-NEXT:    s_and_b32 s6, s12, 1
-; GCN-IR-NEXT:    s_and_b64 s[18:19], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_sub_u32 s14, s14, s18
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-IR-NEXT:    s_subb_u32 s15, s15, s19
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
+; GCN-IR-NEXT:    s_lshr_b32 s6, s9, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[14:15], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s6, s16, s12
+; GCN-IR-NEXT:    s_subb_u32 s6, s17, s13
+; GCN-IR-NEXT:    s_ashr_i32 s14, s6, 31
+; GCN-IR-NEXT:    s_mov_b32 s15, s14
+; GCN-IR-NEXT:    s_and_b32 s6, s14, 1
+; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[4:5]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s14
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s15
+; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
+; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[6:7]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow6
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[10:11], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[6:7], s[6:7], s[8:9]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
 ; GCN-IR-NEXT:    s_branch .LBB0_6
 ; GCN-IR-NEXT:  .LBB0_5:
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[14:15]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[12:13]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[14:15]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[12:13]
 ; GCN-IR-NEXT:  .LBB0_6: ; %udiv-end
 ; GCN-IR-NEXT:    v_mul_lo_u32 v1, s4, v1
 ; GCN-IR-NEXT:    v_mul_hi_u32 v2, s4, v0
@@ -363,11 +360,10 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v5
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v6, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v5
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[12:13], v[5:6]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[12:13]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
@@ -380,6 +376,7 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_lshr_b64 v[12:13], v[0:1], v12
 ; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, v6, v11, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
@@ -392,18 +389,16 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v13, vcc
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v16, v10, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v17, v10, v2
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[10:11], v[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v10
-; GCN-IR-NEXT:    v_sub_i32_e64 v12, s[4:5], v12, v17
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v11
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e64 v12, s[4:5], v12, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[4:5], v13, v11, s[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
-; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[4:5], v13, v16, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
@@ -835,60 +830,57 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s3
 ; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
-; GCN-IR-NEXT:    s_min_u32 s6, s4, s5
-; GCN-IR-NEXT:    s_add_u32 s8, s6, 0xffffffc5
-; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
+; GCN-IR-NEXT:    s_min_u32 s8, s4, s5
+; GCN-IR-NEXT:    s_add_u32 s6, s8, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s7, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[6:7], 63
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[6:7], 63
 ; GCN-IR-NEXT:    s_xor_b64 s[14:15], s[10:11], -1
 ; GCN-IR-NEXT:    s_and_b64 s[12:13], s[14:15], s[12:13]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s7, 63, s8
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], 24, s7
+; GCN-IR-NEXT:    s_add_u32 s10, s6, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
+; GCN-IR-NEXT:    s_sub_i32 s6, 63, s6
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s6
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], 24, s10
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
 ; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s6, 58, s6
-; GCN-IR-NEXT:    s_subb_u32 s7, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB6_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
-; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s4, s14, s12
-; GCN-IR-NEXT:    s_subb_u32 s4, s15, s13
-; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s11, s10
-; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
-; GCN-IR-NEXT:    s_and_b64 s[16:17], s[10:11], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s16
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, s17
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
-; GCN-IR-NEXT:    s_add_u32 s6, s6, 1
-; GCN-IR-NEXT:    s_addc_u32 s7, s7, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, s14, s10
+; GCN-IR-NEXT:    s_subb_u32 s4, s15, s11
+; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, s13
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_3
 ; GCN-IR-NEXT:  .LBB6_4: ; %Flow5
-; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[8:9], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s5
@@ -1027,57 +1019,54 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s3
 ; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
-; GCN-IR-NEXT:    s_min_u32 s6, s4, s5
-; GCN-IR-NEXT:    s_sub_u32 s8, 59, s6
-; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT:    s_min_u32 s8, s4, s5
+; GCN-IR-NEXT:    s_sub_u32 s6, 59, s8
+; GCN-IR-NEXT:    s_subb_u32 s7, 0, 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[6:7], 63
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[12:13], s[6:7], 63
 ; GCN-IR-NEXT:    s_xor_b64 s[14:15], s[10:11], -1
 ; GCN-IR-NEXT:    s_and_b64 s[12:13], s[14:15], s[12:13]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s9
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[10:11], v[0:1]
-; GCN-IR-NEXT:    s_sub_i32 s7, 63, s8
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, vcc
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s7
+; GCN-IR-NEXT:    s_add_u32 s10, s6, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s7, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
+; GCN-IR-NEXT:    s_sub_i32 s6, 63, s6
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s6
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s10
-; GCN-IR-NEXT:    s_add_u32 s6, s6, 0xffffffc4
-; GCN-IR-NEXT:    s_addc_u32 s7, 0, -1
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], s[2:3], s10
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 0xffffffc4
+; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB7_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[12:13], s[12:13], 1
-; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 31
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s4, 23, s12
-; GCN-IR-NEXT:    s_subb_u32 s4, 0, s13
-; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
-; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
-; GCN-IR-NEXT:    s_and_b32 s10, s10, 24
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s10
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s7
-; GCN-IR-NEXT:    s_add_u32 s6, s6, 1
-; GCN-IR-NEXT:    s_addc_u32 s7, s7, 0
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, 23, s10
+; GCN-IR-NEXT:    s_subb_u32 s4, 0, s11
+; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
+; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
+; GCN-IR-NEXT:    s_and_b32 s12, s12, 24
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, 0
+; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_3
 ; GCN-IR-NEXT:  .LBB7_4: ; %Flow5
-; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[8:9], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v0, s4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s5
@@ -1232,12 +1221,11 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[8:9], v[2:3]
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB8_5
@@ -1249,6 +1237,7 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB8_3: ; %udiv-do-while
@@ -1261,18 +1250,16 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v14, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v15, v10, v0
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[10:11], v[6:7]
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, v10
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v15
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, v11
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v14, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
@@ -1329,12 +1316,11 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    v_cmp_ge_u64_e32 vcc, v[7:8], v[2:3]
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
@@ -1343,6 +1329,7 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
@@ -1351,20 +1338,18 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
 ; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v12, 0x8000, v10
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, v[10:11], v[6:7]
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, v10
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v12
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, v11
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
 ; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]

diff  --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 6dcb7f7a6b793..c6585d01ee4d4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -288,12 +288,11 @@ define void @sink_splat_mul_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB7_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    mulw a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    mulw a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB7_6
+; CHECK-NEXT:    bnez a2, .LBB7_6
 ; CHECK-NEXT:  .LBB7_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -381,12 +380,11 @@ define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB8_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    addw a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    addw a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB8_6
+; CHECK-NEXT:    bnez a2, .LBB8_6
 ; CHECK-NEXT:  .LBB8_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -474,12 +472,11 @@ define void @sink_splat_sub_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB9_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    addw a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    addw a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB9_6
+; CHECK-NEXT:    bnez a2, .LBB9_6
 ; CHECK-NEXT:  .LBB9_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -567,12 +564,11 @@ define void @sink_splat_rsub_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB10_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    subw a2, a1, a3
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    subw a3, a1, a3
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB10_6
+; CHECK-NEXT:    bnez a2, .LBB10_6
 ; CHECK-NEXT:  .LBB10_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -660,12 +656,11 @@ define void @sink_splat_and_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB11_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    and a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    and a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB11_6
+; CHECK-NEXT:    bnez a2, .LBB11_6
 ; CHECK-NEXT:  .LBB11_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -753,12 +748,11 @@ define void @sink_splat_or_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB12_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    or a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    or a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB12_6
+; CHECK-NEXT:    bnez a2, .LBB12_6
 ; CHECK-NEXT:  .LBB12_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -846,12 +840,11 @@ define void @sink_splat_xor_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB13_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    xor a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    xor a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB13_6
+; CHECK-NEXT:    bnez a2, .LBB13_6
 ; CHECK-NEXT:  .LBB13_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -1047,12 +1040,11 @@ define void @sink_splat_shl_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB17_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    sllw a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    sllw a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB17_6
+; CHECK-NEXT:    bnez a2, .LBB17_6
 ; CHECK-NEXT:  .LBB17_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -1140,12 +1132,11 @@ define void @sink_splat_lshr_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB18_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    srlw a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    srlw a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB18_6
+; CHECK-NEXT:    bnez a2, .LBB18_6
 ; CHECK-NEXT:  .LBB18_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -1233,12 +1224,11 @@ define void @sink_splat_ashr_scalable(i32* nocapture %a) {
 ; CHECK-NEXT:  .LBB19_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a2, 0(a0)
-; CHECK-NEXT:    mv a3, a1
-; CHECK-NEXT:    srli a1, a2, 2
-; CHECK-NEXT:    sw a1, 0(a0)
-; CHECK-NEXT:    addi a1, a3, 1
+; CHECK-NEXT:    srli a2, a2, 2
+; CHECK-NEXT:    sw a2, 0(a0)
+; CHECK-NEXT:    addi a1, a1, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a1, a3, .LBB19_6
+; CHECK-NEXT:    bnez a1, .LBB19_6
 ; CHECK-NEXT:  .LBB19_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -1541,12 +1531,11 @@ define void @sink_splat_fmul_scalable(float* nocapture %a, float %x) {
 ; CHECK-NEXT:  .LBB26_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    mv a2, a1
 ; CHECK-NEXT:    fmul.s ft0, ft0, fa0
 ; CHECK-NEXT:    fsw ft0, 0(a0)
 ; CHECK-NEXT:    addi a1, a1, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a1, a2, .LBB26_6
+; CHECK-NEXT:    bnez a1, .LBB26_6
 ; CHECK-NEXT:  .LBB26_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -1633,12 +1622,11 @@ define void @sink_splat_fdiv_scalable(float* nocapture %a, float %x) {
 ; CHECK-NEXT:  .LBB27_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    mv a2, a1
 ; CHECK-NEXT:    fdiv.s ft0, ft0, fa0
 ; CHECK-NEXT:    fsw ft0, 0(a0)
 ; CHECK-NEXT:    addi a1, a1, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a1, a2, .LBB27_6
+; CHECK-NEXT:    bnez a1, .LBB27_6
 ; CHECK-NEXT:  .LBB27_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -1725,12 +1713,11 @@ define void @sink_splat_frdiv_scalable(float* nocapture %a, float %x) {
 ; CHECK-NEXT:  .LBB28_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    mv a2, a1
 ; CHECK-NEXT:    fdiv.s ft0, fa0, ft0
 ; CHECK-NEXT:    fsw ft0, 0(a0)
 ; CHECK-NEXT:    addi a1, a1, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a1, a2, .LBB28_6
+; CHECK-NEXT:    bnez a1, .LBB28_6
 ; CHECK-NEXT:  .LBB28_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -1817,12 +1804,11 @@ define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) {
 ; CHECK-NEXT:  .LBB29_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    mv a2, a1
 ; CHECK-NEXT:    fadd.s ft0, ft0, fa0
 ; CHECK-NEXT:    fsw ft0, 0(a0)
 ; CHECK-NEXT:    addi a1, a1, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a1, a2, .LBB29_6
+; CHECK-NEXT:    bnez a1, .LBB29_6
 ; CHECK-NEXT:  .LBB29_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -1909,12 +1895,11 @@ define void @sink_splat_fsub_scalable(float* nocapture %a, float %x) {
 ; CHECK-NEXT:  .LBB30_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    mv a2, a1
 ; CHECK-NEXT:    fsub.s ft0, ft0, fa0
 ; CHECK-NEXT:    fsw ft0, 0(a0)
 ; CHECK-NEXT:    addi a1, a1, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a1, a2, .LBB30_6
+; CHECK-NEXT:    bnez a1, .LBB30_6
 ; CHECK-NEXT:  .LBB30_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -2001,12 +1986,11 @@ define void @sink_splat_frsub_scalable(float* nocapture %a, float %x) {
 ; CHECK-NEXT:  .LBB31_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    flw ft0, 0(a0)
-; CHECK-NEXT:    mv a2, a1
 ; CHECK-NEXT:    fsub.s ft0, fa0, ft0
 ; CHECK-NEXT:    fsw ft0, 0(a0)
 ; CHECK-NEXT:    addi a1, a1, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a1, a2, .LBB31_6
+; CHECK-NEXT:    bnez a1, .LBB31_6
 ; CHECK-NEXT:  .LBB31_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -2180,13 +2164,12 @@ define void @sink_splat_fma_scalable(float* noalias nocapture %a, float* noalias
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    flw ft0, 0(a0)
 ; CHECK-NEXT:    flw ft1, 0(a1)
-; CHECK-NEXT:    mv a3, a2
 ; CHECK-NEXT:    fmadd.s ft0, ft0, fa0, ft1
 ; CHECK-NEXT:    fsw ft0, 0(a0)
 ; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a1, a1, 4
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a3, .LBB34_6
+; CHECK-NEXT:    bnez a2, .LBB34_6
 ; CHECK-NEXT:  .LBB34_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -2283,13 +2266,12 @@ define void @sink_splat_fma_commute_scalable(float* noalias nocapture %a, float*
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    flw ft0, 0(a0)
 ; CHECK-NEXT:    flw ft1, 0(a1)
-; CHECK-NEXT:    mv a3, a2
 ; CHECK-NEXT:    fmadd.s ft0, fa0, ft0, ft1
 ; CHECK-NEXT:    fsw ft0, 0(a0)
 ; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a1, a1, 4
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a3, .LBB35_6
+; CHECK-NEXT:    bnez a2, .LBB35_6
 ; CHECK-NEXT:  .LBB35_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -2607,12 +2589,11 @@ define void @sink_splat_udiv_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB42_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    divuw a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    divuw a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB42_6
+; CHECK-NEXT:    bnez a2, .LBB42_6
 ; CHECK-NEXT:  .LBB42_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -2700,12 +2681,11 @@ define void @sink_splat_sdiv_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB43_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    divw a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    divw a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB43_6
+; CHECK-NEXT:    bnez a2, .LBB43_6
 ; CHECK-NEXT:  .LBB43_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -2793,12 +2773,11 @@ define void @sink_splat_urem_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB44_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    remuw a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    remuw a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB44_6
+; CHECK-NEXT:    bnez a2, .LBB44_6
 ; CHECK-NEXT:  .LBB44_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:
@@ -2886,12 +2865,11 @@ define void @sink_splat_srem_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:  .LBB45_6: # %for.body
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
-; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    remw a2, a3, a1
-; CHECK-NEXT:    sw a2, 0(a0)
-; CHECK-NEXT:    addi a2, a4, 1
+; CHECK-NEXT:    remw a3, a3, a1
+; CHECK-NEXT:    sw a3, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
 ; CHECK-NEXT:    addi a0, a0, 4
-; CHECK-NEXT:    bgeu a2, a4, .LBB45_6
+; CHECK-NEXT:    bnez a2, .LBB45_6
 ; CHECK-NEXT:  .LBB45_7: # %for.cond.cleanup
 ; CHECK-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/xaluo.ll b/llvm/test/CodeGen/RISCV/xaluo.ll
index 473ea4040bf84..3d955098f564b 100644
--- a/llvm/test/CodeGen/RISCV/xaluo.ll
+++ b/llvm/test/CodeGen/RISCV/xaluo.ll
@@ -428,31 +428,29 @@ define zeroext i1 @uaddo.i32.constant_one(i32 %v1, i32* %res) {
 ; RV32-LABEL: uaddo.i32.constant_one:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    addi a2, a0, 1
-; RV32-NEXT:    sltu a0, a2, a0
+; RV32-NEXT:    seqz a0, a2
 ; RV32-NEXT:    sw a2, 0(a1)
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: uaddo.i32.constant_one:
 ; RV64:       # %bb.0: # %entry
-; RV64-NEXT:    sext.w a2, a0
-; RV64-NEXT:    addiw a3, a0, 1
-; RV64-NEXT:    sltu a0, a3, a2
-; RV64-NEXT:    sw a3, 0(a1)
+; RV64-NEXT:    addiw a2, a0, 1
+; RV64-NEXT:    seqz a0, a2
+; RV64-NEXT:    sw a2, 0(a1)
 ; RV64-NEXT:    ret
 ;
 ; RV32ZBA-LABEL: uaddo.i32.constant_one:
 ; RV32ZBA:       # %bb.0: # %entry
 ; RV32ZBA-NEXT:    addi a2, a0, 1
-; RV32ZBA-NEXT:    sltu a0, a2, a0
+; RV32ZBA-NEXT:    seqz a0, a2
 ; RV32ZBA-NEXT:    sw a2, 0(a1)
 ; RV32ZBA-NEXT:    ret
 ;
 ; RV64ZBA-LABEL: uaddo.i32.constant_one:
 ; RV64ZBA:       # %bb.0: # %entry
-; RV64ZBA-NEXT:    sext.w a2, a0
-; RV64ZBA-NEXT:    addiw a3, a0, 1
-; RV64ZBA-NEXT:    sltu a0, a3, a2
-; RV64ZBA-NEXT:    sw a3, 0(a1)
+; RV64ZBA-NEXT:    addiw a2, a0, 1
+; RV64ZBA-NEXT:    seqz a0, a2
+; RV64ZBA-NEXT:    sw a2, 0(a1)
 ; RV64ZBA-NEXT:    ret
 entry:
   %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %v1, i32 1)
@@ -530,7 +528,7 @@ define zeroext i1 @uaddo.i64.constant_one(i64 %v1, i64* %res) {
 ; RV64-LABEL: uaddo.i64.constant_one:
 ; RV64:       # %bb.0: # %entry
 ; RV64-NEXT:    addi a2, a0, 1
-; RV64-NEXT:    sltu a0, a2, a0
+; RV64-NEXT:    seqz a0, a2
 ; RV64-NEXT:    sd a2, 0(a1)
 ; RV64-NEXT:    ret
 ;
@@ -551,7 +549,7 @@ define zeroext i1 @uaddo.i64.constant_one(i64 %v1, i64* %res) {
 ; RV64ZBA-LABEL: uaddo.i64.constant_one:
 ; RV64ZBA:       # %bb.0: # %entry
 ; RV64ZBA-NEXT:    addi a2, a0, 1
-; RV64ZBA-NEXT:    sltu a0, a2, a0
+; RV64ZBA-NEXT:    seqz a0, a2
 ; RV64ZBA-NEXT:    sd a2, 0(a1)
 ; RV64ZBA-NEXT:    ret
 entry:


        


More information about the llvm-commits mailing list