[llvm] [AMDGPU] Remove setcc by using add/sub carryout (PR #155255)

via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 2 08:11:52 PDT 2025


https://github.com/LU-JOHN updated https://github.com/llvm/llvm-project/pull/155255

>From 7777081d3347cb7972f7a0c87ccb403b7b76c6c9 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 19 Sep 2025 15:17:42 -0500
Subject: [PATCH 01/18] Add test ensuring S_UADDO_PSEUDO selection

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll | 28 ++++++++++++++++++++++
 1 file changed, 28 insertions(+)
 create mode 100644 llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll

diff --git a/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll b/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
new file mode 100644
index 0000000000000..607997ecf6d1e
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
@@ -0,0 +1,28 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc -mtriple=amdgcn-amd-amdpal -stop-after=amdgpu-isel < %s | FileCheck -check-prefixes=GCN-ISEL %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s
+; Ensure that S_UADDO_PSEUDO is selected when carryout user is S_ADD_CO_PSEUDO
+
+; GCN-ISEL-LABEL: name:   s_uaddo_pseudo
+; GCN-ISEL-LABEL: body:
+; GCN-ISEL: S_UADDO_PSEUDO
+; GCN-ISEL: S_ADD_CO_PSEUDO
+
+define amdgpu_ps i32 @s_uaddo_pseudo(i32 inreg %val0, i32 inreg %val1) {
+; CHECK-LABEL: s_uaddo_pseudo:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_add_i32 s0, s0, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; CHECK-NEXT:    s_addc_u32 s0, 1, 0
+; CHECK-NEXT:    ; return to shader part epilog
+  %pair = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val0, i32 %val1)
+  %carryout = extractvalue {i32, i1} %pair, 1
+  %add_overflow = sext i1 %carryout to i32
+  %cmp_carryout = icmp ult i32 0, %add_overflow
+  %zext_carryout = zext i1 %cmp_carryout to i32
+  %result = add i32 %zext_carryout, 1
+  ret i32 %result
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; GCN-ISEL: {{.*}}

>From 3cc14787bee51e5eebcedc5e90da27943667223e Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 22 Sep 2025 11:02:53 -0500
Subject: [PATCH 02/18] Streamline testcase

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll | 12 +++++-------
 1 file changed, 5 insertions(+), 7 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll b/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
index 607997ecf6d1e..2d380e622d028 100644
--- a/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
+++ b/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
@@ -8,19 +8,17 @@
 ; GCN-ISEL: S_UADDO_PSEUDO
 ; GCN-ISEL: S_ADD_CO_PSEUDO
 
-define amdgpu_ps i32 @s_uaddo_pseudo(i32 inreg %val0, i32 inreg %val1) {
+define amdgpu_ps i32 @s_uaddo_pseudo(i32 inreg %val0) {
 ; CHECK-LABEL: s_uaddo_pseudo:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_i32 s0, s0, s1
+; CHECK-NEXT:    s_add_i32 s0, s0, 1
 ; CHECK-NEXT:    s_cselect_b64 s[0:1], 1, 0
 ; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
 ; CHECK-NEXT:    s_addc_u32 s0, 1, 0
 ; CHECK-NEXT:    ; return to shader part epilog
-  %pair = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %val0, i32 %val1)
-  %carryout = extractvalue {i32, i1} %pair, 1
-  %add_overflow = sext i1 %carryout to i32
-  %cmp_carryout = icmp ult i32 0, %add_overflow
-  %zext_carryout = zext i1 %cmp_carryout to i32
+  %pair = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %val0, i32 1)
+  %carryout = extractvalue { i32, i1 } %pair, 1
+  %zext_carryout = zext i1 %carryout to i32
   %result = add i32 %zext_carryout, 1
   ret i32 %result
 }

>From 9a602adb9fdfcbd6a7b87bf61495f2a73aaa387b Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 22 Sep 2025 13:03:41 -0500
Subject: [PATCH 03/18] Update tests

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll b/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
index 2d380e622d028..ebcd84782a3dc 100644
--- a/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
+++ b/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
@@ -11,7 +11,7 @@
 define amdgpu_ps i32 @s_uaddo_pseudo(i32 inreg %val0) {
 ; CHECK-LABEL: s_uaddo_pseudo:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_i32 s0, s0, 1
+; CHECK-NEXT:    s_add_u32 s0, s0, 1
 ; CHECK-NEXT:    s_cselect_b64 s[0:1], 1, 0
 ; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
 ; CHECK-NEXT:    s_addc_u32 s0, 1, 0

>From 14930a15a888c830d4325f991a2379a39e066c3c Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 24 Sep 2025 15:21:21 -0500
Subject: [PATCH 04/18] Set all bits for wave mask

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll b/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
index ebcd84782a3dc..134729ead3aa0 100644
--- a/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
+++ b/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
@@ -12,7 +12,7 @@ define amdgpu_ps i32 @s_uaddo_pseudo(i32 inreg %val0) {
 ; CHECK-LABEL: s_uaddo_pseudo:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_add_u32 s0, s0, 1
-; CHECK-NEXT:    s_cselect_b64 s[0:1], 1, 0
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
 ; CHECK-NEXT:    s_addc_u32 s0, 1, 0
 ; CHECK-NEXT:    ; return to shader part epilog

>From 731d6ceefa10529b634b35a7e87f51a797b582dd Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 25 Sep 2025 09:15:04 -0500
Subject: [PATCH 05/18] Use same subtarget.  Test sub.

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll | 22 +++++++++++++++++++++-
 1 file changed, 21 insertions(+), 1 deletion(-)

diff --git a/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll b/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
index 134729ead3aa0..a828ee0a7883c 100644
--- a/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
+++ b/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
-; RUN: llc -mtriple=amdgcn-amd-amdpal -stop-after=amdgpu-isel < %s | FileCheck -check-prefixes=GCN-ISEL %s
+; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -stop-after=amdgpu-isel < %s | FileCheck -check-prefixes=GCN-ISEL %s
 ; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s
 ; Ensure that S_UADDO_PSEUDO is selected when carryout user is S_ADD_CO_PSEUDO
 
@@ -22,5 +22,25 @@ define amdgpu_ps i32 @s_uaddo_pseudo(i32 inreg %val0) {
   %result = add i32 %zext_carryout, 1
   ret i32 %result
 }
+
+; GCN-ISEL-LABEL: name:   s_usubo_pseudo
+; GCN-ISEL-LABEL: body:
+; GCN-ISEL: S_USUBO_PSEUDO
+; GCN-ISEL: S_SUB_CO_PSEUDO
+
+define amdgpu_ps i32 @s_usubo_pseudo(i32 inreg %val0, i32 inreg %val1) {
+; CHECK-LABEL: s_usubo_pseudo:
+; CHECK:       ; %bb.0:
+; CHECK-NEXT:    s_sub_u32 s0, s0, 1
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; CHECK-NEXT:    s_subb_u32 s0, s1, 0
+; CHECK-NEXT:    ; return to shader part epilog
+  %pair = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %val0, i32 1)
+  %carryout = extractvalue { i32, i1 } %pair, 1
+  %zext_carryout = zext i1 %carryout to i32
+  %result = sub i32 %val1, %zext_carryout
+  ret i32 %result
+}
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GCN-ISEL: {{.*}}

>From c82304ec2cc9009d190047309321405686ab28cd Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 25 Sep 2025 09:41:09 -0500
Subject: [PATCH 06/18] Rename testcase

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll | 46 ----------------------
 1 file changed, 46 deletions(-)
 delete mode 100644 llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll

diff --git a/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll b/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
deleted file mode 100644
index a828ee0a7883c..0000000000000
--- a/llvm/test/CodeGen/AMDGPU/s_uaddo_pseudo.ll
+++ /dev/null
@@ -1,46 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -stop-after=amdgpu-isel < %s | FileCheck -check-prefixes=GCN-ISEL %s
-; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck %s
-; Ensure that S_UADDO_PSEUDO is selected when carryout user is S_ADD_CO_PSEUDO
-
-; GCN-ISEL-LABEL: name:   s_uaddo_pseudo
-; GCN-ISEL-LABEL: body:
-; GCN-ISEL: S_UADDO_PSEUDO
-; GCN-ISEL: S_ADD_CO_PSEUDO
-
-define amdgpu_ps i32 @s_uaddo_pseudo(i32 inreg %val0) {
-; CHECK-LABEL: s_uaddo_pseudo:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s0, s0, 1
-; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
-; CHECK-NEXT:    s_cmp_lg_u64 s[0:1], 0
-; CHECK-NEXT:    s_addc_u32 s0, 1, 0
-; CHECK-NEXT:    ; return to shader part epilog
-  %pair = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %val0, i32 1)
-  %carryout = extractvalue { i32, i1 } %pair, 1
-  %zext_carryout = zext i1 %carryout to i32
-  %result = add i32 %zext_carryout, 1
-  ret i32 %result
-}
-
-; GCN-ISEL-LABEL: name:   s_usubo_pseudo
-; GCN-ISEL-LABEL: body:
-; GCN-ISEL: S_USUBO_PSEUDO
-; GCN-ISEL: S_SUB_CO_PSEUDO
-
-define amdgpu_ps i32 @s_usubo_pseudo(i32 inreg %val0, i32 inreg %val1) {
-; CHECK-LABEL: s_usubo_pseudo:
-; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_sub_u32 s0, s0, 1
-; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
-; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
-; CHECK-NEXT:    s_subb_u32 s0, s1, 0
-; CHECK-NEXT:    ; return to shader part epilog
-  %pair = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %val0, i32 1)
-  %carryout = extractvalue { i32, i1 } %pair, 1
-  %zext_carryout = zext i1 %carryout to i32
-  %result = sub i32 %val1, %zext_carryout
-  ret i32 %result
-}
-;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
-; GCN-ISEL: {{.*}}

>From b47c28f6eda3a19ea2619205b0bdfbb903aa360f Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 25 Aug 2025 09:53:39 -0500
Subject: [PATCH 07/18] Remove setcc by using add/sub carryout

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  56 +++
 llvm/test/CodeGen/AMDGPU/addsub64_carry.ll    |  53 +--
 .../test/CodeGen/AMDGPU/carryout-selection.ll |  80 ++--
 llvm/test/CodeGen/AMDGPU/sdiv64.ll            | 364 +++++++++---------
 llvm/test/CodeGen/AMDGPU/srem64.ll            | 312 ++++++++-------
 llvm/test/CodeGen/AMDGPU/uaddsat.ll           |  47 +--
 llvm/test/CodeGen/AMDGPU/udiv64.ll            | 306 ++++++++-------
 llvm/test/CodeGen/AMDGPU/urem64.ll            | 201 +++++-----
 llvm/test/CodeGen/AMDGPU/usubsat.ll           |  54 +--
 9 files changed, 736 insertions(+), 737 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 16530087444d2..927a4216ff0e4 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16579,6 +16579,62 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
     }
   }
 
+  // Eliminate setcc by using carryout from add/sub instruction
+
+  // X = ADD i64 Y, Z          Xlo = UADDO       i32 Ylo, Zlo
+  // setcc X ult Y     ->      XHi = UADDO_CARRY i32 Yhi, Zhi
+  // similarly for subtraction
+
+  // X = ADD i64 Y, 1          Xlo = UADDO       i32 Ylo, 1
+  // setcc X eq 0      ->      XHi = UADDO_CARRY i32 Yhi, 0
+
+  // Don't split a 64-bit add/sub into two 32-bit add/sub instructions for
+  // non-divergent operations.  This can result in lo/hi 32-bit operations
+  // being done in SGPR and VGPR with additional operations being needed
+  // to move operands and/or generate the intermediate carry.
+  if (VT == MVT::i64 && N->isDivergent() &&
+      ((((LHS.getOpcode() == ISD::ADD && CC == ISD::SETULT) ||
+         (LHS.getOpcode() == ISD::SUB && CC == ISD::SETUGT)) &&
+        LHS.getOperand(0) == RHS) ||
+       (LHS.getOpcode() == ISD::ADD && CC == ISD::SETEQ && CRHS &&
+        CRHS->isZero() && dyn_cast<ConstantSDNode>(LHS.getOperand(1)) &&
+        dyn_cast<ConstantSDNode>(LHS.getOperand(1))->isOne()))) {
+    EVT TargetType = MVT::i32;
+    EVT CarryVT = MVT::i1;
+    const SDValue One = DAG.getConstant(1, SL, TargetType);
+    bool IsAdd = LHS.getOpcode() == ISD::ADD;
+
+    SDValue Op0 = LHS.getOperand(0);
+    SDValue Op1 = LHS.getOperand(1);
+
+    SDValue Op0Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, Op0);
+    SDValue Op1Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, Op1);
+
+    SDValue Op0Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, TargetType, Op0, One);
+    SDValue Op1Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, TargetType, Op1, One);
+
+    SDValue NodeLo =
+        DAG.getNode(IsAdd ? ISD::UADDO : ISD::USUBO, SL,
+                    DAG.getVTList(TargetType, CarryVT), {Op0Lo, Op1Lo});
+
+    SDValue CarryInHi = SDValue(NodeLo.getNode(), 1);
+    SDValue NodeHi = DAG.getNode(IsAdd ? ISD::UADDO_CARRY : ISD::USUBO_CARRY,
+                                 SL, DAG.getVTList(TargetType, CarryVT),
+                                 {Op0Hi, Op1Hi, CarryInHi});
+
+    SDValue ResultLo = SDValue(NodeLo.getNode(), 0);
+    SDValue ResultHi = SDValue(NodeHi.getNode(), 0);
+
+    EVT ConcatType = EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
+    SDValue JoinedResult =
+        DAG.getBuildVector(ConcatType, SL, {ResultLo, ResultHi});
+
+    SDValue Result = DAG.getNode(ISD::BITCAST, SL, VT, JoinedResult);
+    SDValue Overflow = SDValue(NodeHi.getNode(), 1);
+    DCI.CombineTo(LHS.getNode(), Result);
+    return Overflow;
+  }
+
   if (VT != MVT::f32 && VT != MVT::f64 &&
       (!Subtarget->has16BitInsts() || VT != MVT::f16))
     return SDValue();
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
index d326966e849e2..397835972e4d4 100644
--- a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
+++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
@@ -17,12 +17,9 @@ define %struct.uint96 @v_add64_32(i64 %val64A, i64 %val64B, i32 %val32) {
 ; CHECK-LABEL: v_add64_32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v5, vcc, v0, v2
-; CHECK-NEXT:    v_addc_co_u32_e32 v6, vcc, v1, v3, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[5:6], v[0:1]
-; CHECK-NEXT:    v_mov_b32_e32 v0, v5
+; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
 ; CHECK-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v4, vcc
-; CHECK-NEXT:    v_mov_b32_e32 v1, v6
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %sum64 = add i64 %val64A, %val64B
   %obit = icmp ult i64 %sum64, %val64A
@@ -38,16 +35,14 @@ define <2 x i64> @v_uadd_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_add_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT:    v_add_co_u32_e64 v4, s[4:5], v0, v4
 ; CHECK-NEXT:    v_addc_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT:    v_add_co_u32_e32 v4, vcc, v0, v4
-; CHECK-NEXT:    v_addc_co_u32_e32 v5, vcc, v1, v5, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    v_addc_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, v2
+; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %pair = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -63,16 +58,14 @@ define <2 x i64> @v_usub_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_sub_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT:    v_sub_co_u32_e64 v4, s[4:5], v0, v4
 ; CHECK-NEXT:    v_subb_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT:    v_sub_co_u32_e32 v4, vcc, v0, v4
-; CHECK-NEXT:    v_subb_co_u32_e32 v5, vcc, v1, v5, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    v_subb_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, v2
+; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %pair = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -87,10 +80,9 @@ define i64 @v_uadd_i64(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_uadd_i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
-; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -109,7 +101,6 @@ define i64 @v_uadd_p1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, 1, v0
 ; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
@@ -147,10 +138,9 @@ define i64 @v_usub_p1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_usub_p1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, -1, v0
-; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_subrev_co_u32_e32 v0, vcc, 1, v0
+; CHECK-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -167,10 +157,9 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_usub_n1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, 1, v0
-; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_subrev_co_u32_e32 v0, vcc, -1, v0
+; CHECK-NEXT:    v_subbrev_co_u32_e32 v1, vcc, -1, v1, vcc
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index 51652a09863e0..d08e3f1480616 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -841,7 +841,7 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GCN-ISEL-LABEL: name:   vuaddo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_ADD_U64_PSEUDO
+; GCN-ISEL: V_ADD_CO_U32_e64
 
 define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
 ; CISI-LABEL: vuaddo64:
@@ -854,9 +854,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s4, s0
 ; CISI-NEXT:    v_mov_b32_e32 v1, s9
 ; CISI-NEXT:    v_add_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; CISI-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[0:1]
 ; CISI-NEXT:    s_mov_b32 s5, s1
+; CISI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s6
@@ -876,7 +875,6 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI-NEXT:    v_mov_b32_e32 v6, s5
 ; VI-NEXT:    v_add_u32_e32 v5, vcc, s4, v0
 ; VI-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
-; VI-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[5:6]
 ; VI-NEXT:    v_mov_b32_e32 v2, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s2
 ; VI-NEXT:    v_mov_b32_e32 v4, s3
@@ -894,7 +892,6 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s6, v0
 ; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v2, v0, s[2:3]
@@ -909,8 +906,7 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1010-NEXT:    v_add_co_u32 v0, s4, s6, v0
 ; GFX1010-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1010-NEXT:    s_endpgm
@@ -923,9 +919,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W32-NEXT:    v_add_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s4
-; GFX1030W32-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1030W32-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W32-NEXT:    s_endpgm
@@ -938,9 +933,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W64-NEXT:    v_add_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
-; GFX1030W64-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX1030W64-NEXT:    v_add_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
+; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W64-NEXT:    s_endpgm
@@ -955,10 +949,9 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_co_u32 v0, s4, s6, v0
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    global_store_b8 v2, v3, s[2:3]
@@ -969,16 +962,17 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    s_load_b64 s[6:7], s[4:5], 0x34
 ; GFX1250-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX1250-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT:    v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[2:3]
-; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_add_co_u32 v0, s4, s6, v0
+; GFX1250-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1250-NEXT:    s_clause 0x1
-; GFX1250-NEXT:    global_store_b64 v1, v[2:3], s[0:1]
-; GFX1250-NEXT:    global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT:    global_store_b8 v2, v3, s[2:3]
 ; GFX1250-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
@@ -1821,7 +1815,7 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GCN-ISEL-LABEL: name:   vusubo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_SUB_U64_PSEUDO
+; GCN-ISEL: V_SUBB_U32_e64
 
 define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
 ; CISI-LABEL: vusubo64:
@@ -1834,9 +1828,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s4, s0
 ; CISI-NEXT:    v_mov_b32_e32 v1, s9
 ; CISI-NEXT:    v_sub_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CISI-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
 ; CISI-NEXT:    s_mov_b32 s5, s1
+; CISI-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s6
@@ -1856,7 +1849,6 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI-NEXT:    v_mov_b32_e32 v6, s5
 ; VI-NEXT:    v_sub_u32_e32 v5, vcc, s4, v0
 ; VI-NEXT:    v_subbrev_u32_e32 v6, vcc, 0, v6, vcc
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
 ; VI-NEXT:    v_mov_b32_e32 v2, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s2
 ; VI-NEXT:    v_mov_b32_e32 v4, s3
@@ -1874,7 +1866,6 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s6, v0
 ; GFX9-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v2, v0, s[2:3]
@@ -1889,8 +1880,7 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1010-NEXT:    v_sub_co_u32 v0, s4, s6, v0
 ; GFX1010-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1010-NEXT:    s_endpgm
@@ -1903,9 +1893,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W32-NEXT:    v_sub_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
-; GFX1030W32-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1030W32-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W32-NEXT:    s_endpgm
@@ -1918,9 +1907,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W64-NEXT:    v_sub_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
-; GFX1030W64-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX1030W64-NEXT:    v_sub_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
+; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W64-NEXT:    s_endpgm
@@ -1935,10 +1923,9 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_sub_co_u32 v0, s4, s6, v0
-; GFX11-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX11-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    global_store_b8 v2, v3, s[2:3]
@@ -1949,16 +1936,17 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    s_load_b64 s[6:7], s[4:5], 0x34
 ; GFX1250-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX1250-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT:    v_sub_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[2:3]
-; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_sub_co_u32 v0, s4, s6, v0
+; GFX1250-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1250-NEXT:    s_clause 0x1
-; GFX1250-NEXT:    global_store_b64 v1, v[2:3], s[0:1]
-; GFX1250-NEXT:    global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT:    global_store_b8 v2, v3, s[2:3]
 ; GFX1250-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index ddec6af0af69e..522c3c151efc3 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -389,25 +389,25 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_sdiv:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v13, 31, v3
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v1, v12, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v13
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v13
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v11, 31, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v1, v10, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v11
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v11
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v6
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v7
-; GCN-IR-NEXT:    v_min_u32_e32 v11, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v10, v11
+; GCN-IR-NEXT:    v_min_u32_e32 v9, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v8, v9
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[6:7], 0, 0, s[6:7]
@@ -416,70 +416,69 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v14, v12
-; GCN-IR-NEXT:    v_mov_b32_e32 v15, v13
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v11
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v7, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v6, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[6:7], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v4, v10
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[6:7], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v4, v11
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[6:7], v14
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v4, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, v4, v9
+; GCN-IR-NEXT:    v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v16, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v17, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, 1, v16
+; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB1_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v13, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v15, v14
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v11, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v13, v12
 ; GCN-IR-NEXT:    v_xor_b32_e32 v3, v4, v0
 ; GCN-IR-NEXT:    v_xor_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v3, v0
@@ -1474,17 +1473,17 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1492,69 +1491,68 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, 24, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v14, vcc, 58, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB11_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB11_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 24, %x
   ret i64 %result
@@ -1667,17 +1665,17 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1686,70 +1684,69 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v14, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB12_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 32768, %x
   ret i64 %result
@@ -1769,20 +1766,20 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, v1, v10, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v8
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v0, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, v1, v8, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v0, v4
 ; GCN-IR-NEXT:    v_add_i32_e64 v0, s[4:5], 32, v0
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v1, v5
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v0, v1
-; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 48, v8
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v0, v1
+; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 48, v6
 ; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v8
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
@@ -1792,15 +1789,15 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 63, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[4:5], v6
@@ -1813,40 +1810,39 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s12, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s10, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v6, v0
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 0x8000, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v4, v6
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v7, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v3, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v2, v0
 ; GCN-IR-NEXT:  .LBB13_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v11
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v8
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v9
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v9, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 %x, 32768
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 2d95875cad882..6307e28a6e598 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -373,12 +373,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_srem:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v14, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v14
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v14, vcc
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
 ; GCN-IR-NEXT:    v_xor_b32_e32 v2, v2, v4
 ; GCN-IR-NEXT:    v_xor_b32_e32 v3, v3, v4
 ; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v2, v4
@@ -386,12 +386,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT:    v_min_u32_e32 v11, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v10, v11
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -400,7 +400,7 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v15, v14
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
@@ -408,54 +408,53 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, v6, v11
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT:    v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v16, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v17, v11, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v9, v11, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, 1, v16
+; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
@@ -469,10 +468,10 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v15
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v15, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v13
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, %y
   ret i64 %result
@@ -1649,9 +1648,9 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1665,53 +1664,52 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 58, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB11_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1840,9 +1838,9 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1857,54 +1855,53 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1939,20 +1936,20 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_srem_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v8
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
@@ -1963,14 +1960,14 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
@@ -1983,31 +1980,30 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -2016,10 +2012,10 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[4:5], 15
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, 32768
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
index 923017400adb1..7f89581d00fde 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
@@ -693,52 +693,47 @@ define i64 @v_uaddsat_i64(i64 %lhs, i64 %rhs) {
 ; GFX6-LABEL: v_uaddsat_i64:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v0, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_uaddsat_i64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v0, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_uaddsat_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_uaddsat_i64:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc_lo
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_uaddsat_i64:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %result = call i64 @llvm.uadd.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index dc11e81476a7e..16fffd4420714 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -313,19 +313,19 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v14, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v15, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[6:7], v14, v15
+; GCN-IR-NEXT:    v_min_u32_e32 v9, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[6:7], v8, v9
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[6:7], 0, 0, s[6:7]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[6:7], 0, 0, s[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[6:7]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[8:9]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v0, 0, s[4:5]
@@ -333,55 +333,54 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v8
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v2
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v10
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v0, v14
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v15
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v10
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, v6, v9
+; GCN-IR-NEXT:    v_addc_u32_e64 v13, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v10, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v11, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v8, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v9, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v7
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v7, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v6, v0
@@ -1098,12 +1097,12 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffd0, v10
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffd0, v8
+; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[6:7]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0x8000
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[4:5]
@@ -1113,55 +1112,54 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v8
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB9_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
@@ -1188,13 +1186,13 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], 48, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1202,15 +1200,15 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v8
@@ -1223,31 +1221,30 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 0x8000, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v6
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB10_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB10_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB10_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
@@ -1388,13 +1385,13 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], 59, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 59, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1402,51 +1399,50 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc4, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 0xffffffc4, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v7
+; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 23, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 24, v8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 23, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 24, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v6
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index dc25caadb99a9..8567b817f5a45 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -362,12 +362,12 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT:    v_min_u32_e32 v11, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v10, v11
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -383,54 +383,53 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, v6, v11
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT:    v_addc_u32_e64 v15, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v11, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v9, v11, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
@@ -1241,8 +1240,8 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffd0, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffd0, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1257,54 +1256,53 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB8_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB8_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB8_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB8_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1337,8 +1335,8 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v8
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
@@ -1352,14 +1350,14 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
@@ -1372,31 +1370,30 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB9_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
diff --git a/llvm/test/CodeGen/AMDGPU/usubsat.ll b/llvm/test/CodeGen/AMDGPU/usubsat.ll
index 90491a07289a0..3ddb2f02c48fe 100644
--- a/llvm/test/CodeGen/AMDGPU/usubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/usubsat.ll
@@ -730,52 +730,38 @@ define i64 @v_usubsat_i64(i64 %lhs, i64 %rhs) {
 ; GFX6-LABEL: v_usubsat_i64:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, v0, v2
-; GFX6-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_usubsat_i64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_sub_u32_e32 v2, vcc, v0, v2
-; GFX8-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_usubsat_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10-LABEL: v_usubsat_i64:
-; GFX10:       ; %bb.0:
-; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT:    v_sub_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX10-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11-LABEL: v_usubsat_i64:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT:    v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX10PLUS-LABEL: v_usubsat_i64:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX10PLUS-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10PLUS-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc_lo
+; GFX10PLUS-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc_lo
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %result = call i64 @llvm.usub.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %result
 }

>From 19a0a2e504eee9e5ab1686c8a0ca3bc6e0a9af20 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 28 Aug 2025 10:09:37 -0500
Subject: [PATCH 08/18] Get hi-half cleanly

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 927a4216ff0e4..d161b37a4d200 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16601,7 +16601,6 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
         dyn_cast<ConstantSDNode>(LHS.getOperand(1))->isOne()))) {
     EVT TargetType = MVT::i32;
     EVT CarryVT = MVT::i1;
-    const SDValue One = DAG.getConstant(1, SL, TargetType);
     bool IsAdd = LHS.getOpcode() == ISD::ADD;
 
     SDValue Op0 = LHS.getOperand(0);
@@ -16610,8 +16609,8 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
     SDValue Op0Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, Op0);
     SDValue Op1Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, Op1);
 
-    SDValue Op0Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, TargetType, Op0, One);
-    SDValue Op1Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, SL, TargetType, Op1, One);
+    SDValue Op0Hi = getHiHalf64(Op0, DAG);
+    SDValue Op1Hi = getHiHalf64(Op1, DAG);
 
     SDValue NodeLo =
         DAG.getNode(IsAdd ? ISD::UADDO : ISD::USUBO, SL,

>From c182a9a14aadc0de63b20e445e52e3e4cd56ddd4 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 28 Aug 2025 14:43:08 -0500
Subject: [PATCH 09/18] Use sd_match.  Make comment clearer.

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d161b37a4d200..7ab1125b32bea 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16581,24 +16581,24 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
 
   // Eliminate setcc by using carryout from add/sub instruction
 
-  // X = ADD i64 Y, Z          Xlo = UADDO       i32 Ylo, Zlo
-  // setcc X ult Y     ->      XHi = UADDO_CARRY i32 Yhi, Zhi
+  // LHS = ADD i64 RHS, Z          LHSlo = UADDO       i32 RHSlo, Zlo
+  // setcc LHS ult RHS     ->      LHSHi = UADDO_CARRY i32 RHShi, Zhi
   // similarly for subtraction
 
-  // X = ADD i64 Y, 1          Xlo = UADDO       i32 Ylo, 1
-  // setcc X eq 0      ->      XHi = UADDO_CARRY i32 Yhi, 0
+  // LHS = ADD i64 Y, 1            LHSlo = UADDO       i32 Ylo, 1
+  // setcc LHS eq 0        ->      LHSHi = UADDO_CARRY i32 Yhi, 0
 
   // Don't split a 64-bit add/sub into two 32-bit add/sub instructions for
   // non-divergent operations.  This can result in lo/hi 32-bit operations
   // being done in SGPR and VGPR with additional operations being needed
   // to move operands and/or generate the intermediate carry.
   if (VT == MVT::i64 && N->isDivergent() &&
-      ((((LHS.getOpcode() == ISD::ADD && CC == ISD::SETULT) ||
-         (LHS.getOpcode() == ISD::SUB && CC == ISD::SETUGT)) &&
-        LHS.getOperand(0) == RHS) ||
-       (LHS.getOpcode() == ISD::ADD && CC == ISD::SETEQ && CRHS &&
-        CRHS->isZero() && dyn_cast<ConstantSDNode>(LHS.getOperand(1)) &&
-        dyn_cast<ConstantSDNode>(LHS.getOperand(1))->isOne()))) {
+      ((CC == ISD::SETULT &&
+        sd_match(LHS, m_Add(m_Specific(RHS), m_Value()))) ||
+       (CC == ISD::SETUGT &&
+        sd_match(LHS, m_Sub(m_Specific(RHS), m_Value()))) ||
+       (CC == ISD::SETEQ && CRHS && CRHS->isZero() &&
+        sd_match(LHS, m_Add(m_Value(), m_One()))))) {
     EVT TargetType = MVT::i32;
     EVT CarryVT = MVT::i1;
     bool IsAdd = LHS.getOpcode() == ISD::ADD;

>From a3b46db9f438764f0948a65fff801aba59b0dcb2 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 8 Sep 2025 06:54:06 -0500
Subject: [PATCH 10/18] Use getValue

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 7ab1125b32bea..87f6150af1ab1 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16616,20 +16616,20 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
         DAG.getNode(IsAdd ? ISD::UADDO : ISD::USUBO, SL,
                     DAG.getVTList(TargetType, CarryVT), {Op0Lo, Op1Lo});
 
-    SDValue CarryInHi = SDValue(NodeLo.getNode(), 1);
+    SDValue CarryInHi = NodeLo.getValue(1);
     SDValue NodeHi = DAG.getNode(IsAdd ? ISD::UADDO_CARRY : ISD::USUBO_CARRY,
                                  SL, DAG.getVTList(TargetType, CarryVT),
                                  {Op0Hi, Op1Hi, CarryInHi});
 
-    SDValue ResultLo = SDValue(NodeLo.getNode(), 0);
-    SDValue ResultHi = SDValue(NodeHi.getNode(), 0);
+    SDValue ResultLo = NodeLo.getValue(0);
+    SDValue ResultHi = NodeHi.getValue(0);
 
     EVT ConcatType = EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
     SDValue JoinedResult =
         DAG.getBuildVector(ConcatType, SL, {ResultLo, ResultHi});
 
     SDValue Result = DAG.getNode(ISD::BITCAST, SL, VT, JoinedResult);
-    SDValue Overflow = SDValue(NodeHi.getNode(), 1);
+    SDValue Overflow = NodeHi.getValue(1);
     DCI.CombineTo(LHS.getNode(), Result);
     return Overflow;
   }

>From 54ab1cc8fce199b305d78089389ac78c77a77ad7 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 8 Sep 2025 07:26:20 -0500
Subject: [PATCH 11/18] Use explicit type

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 87f6150af1ab1..e0ea01ec183f8 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -16624,9 +16624,8 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
     SDValue ResultLo = NodeLo.getValue(0);
     SDValue ResultHi = NodeHi.getValue(0);
 
-    EVT ConcatType = EVT::getVectorVT(*DAG.getContext(), TargetType, 2);
     SDValue JoinedResult =
-        DAG.getBuildVector(ConcatType, SL, {ResultLo, ResultHi});
+        DAG.getBuildVector(MVT::v2i32, SL, {ResultLo, ResultHi});
 
     SDValue Result = DAG.getNode(ISD::BITCAST, SL, VT, JoinedResult);
     SDValue Overflow = NodeHi.getValue(1);

>From f98e651af2db672f0857e918685b6b257925c82e Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 8 Sep 2025 18:12:19 -0500
Subject: [PATCH 12/18] Update new tests

Signed-off-by: John Lu <John.Lu at amd.com>
---
 .../test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll | 98 ++++++++-----------
 .../CodeGen/AMDGPU/a-v-global-atomicrmw.ll    | 24 ++---
 llvm/test/CodeGen/AMDGPU/sdiv64.ll            | 14 +--
 llvm/test/CodeGen/AMDGPU/srem64.ll            | 14 +--
 llvm/test/CodeGen/AMDGPU/udiv64.ll            | 14 +--
 llvm/test/CodeGen/AMDGPU/urem64.ll            | 14 +--
 6 files changed, 78 insertions(+), 100 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
index d053425afbb6d..3f5c1d41cc7a9 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
@@ -8760,9 +8760,8 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v6
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8781,20 +8780,19 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_cbranch_execz .LBB113_6
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[4:5]
-; GFX90A-NEXT:    v_cndmask_b32_e32 v4, -1, v4, vcc
-; GFX90A-NEXT:    buffer_load_dword v0, v4, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v1, v4, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_cndmask_b32_e32 v0, -1, v4, vcc
+; GFX90A-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v6
+; GFX90A-NEXT:    v_sub_co_u32_e32 v3, vcc, v1, v6
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v3, 0, vcc
-; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v1
-; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v0, v4, s[0:3], 0 offen offset:4
-; GFX90A-NEXT:    buffer_store_dword v2, v4, s[0:3], 0 offen
+; GFX90A-NEXT:    v_subb_co_u32_e32 v4, vcc, v2, v7, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v1
+; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v2
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX90A-NEXT:    buffer_store_dword v3, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:  .LBB113_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GFX90A-NEXT:    ;;#ASMSTART
@@ -8828,10 +8826,9 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8857,11 +8854,11 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
 ; GFX950-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
+; GFX950-NEXT:    s_nop 0
 ; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
 ; GFX950-NEXT:    scratch_store_dwordx2 v4, v[2:3], off
 ; GFX950-NEXT:  .LBB113_6: ; %atomicrmw.phi
 ; GFX950-NEXT:    s_or_b64 exec, exec, s[0:1]
@@ -8901,9 +8898,8 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -8919,18 +8915,17 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_cbranch_execz .LBB114_6
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX90A-NEXT:    v_cndmask_b32_e32 v6, -1, v0, vcc
-; GFX90A-NEXT:    buffer_load_dword v4, v6, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_cndmask_b32_e32 v0, -1, v0, vcc
+; GFX90A-NEXT:    buffer_load_dword v4, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v5, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v4, v2
+; GFX90A-NEXT:    v_sub_co_u32_e32 v1, vcc, v4, v2
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v5, v3, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_subb_co_u32_e32 v2, vcc, v5, v3, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v0, v6, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:  .LBB114_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GFX90A-NEXT:    ;;#ASMSTART
@@ -8963,10 +8958,9 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -8989,7 +8983,6 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -17065,9 +17058,8 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17086,20 +17078,19 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; GFX90A-NEXT:    s_cselect_b32 s4, s4, -1
-; GFX90A-NEXT:    v_mov_b32_e32 v6, s4
-; GFX90A-NEXT:    buffer_load_dword v0, v6, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v1, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_mov_b32_e32 v0, s4
+; GFX90A-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v4
+; GFX90A-NEXT:    v_sub_co_u32_e32 v3, vcc, v1, v4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v3, 0, vcc
-; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v1
-; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v0, v6, s[0:3], 0 offen offset:4
-; GFX90A-NEXT:    buffer_store_dword v2, v6, s[0:3], 0 offen
+; GFX90A-NEXT:    v_subb_co_u32_e32 v4, vcc, v2, v5, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v1
+; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v2
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX90A-NEXT:    buffer_store_dword v3, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:  .LBB221_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    ;;#ASMSTART
 ; GFX90A-NEXT:    ; use a[0:1]
@@ -17132,10 +17123,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17159,11 +17149,11 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
 ; GFX950-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
+; GFX950-NEXT:    s_nop 0
 ; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
 ; GFX950-NEXT:    scratch_store_dwordx2 off, v[2:3], s0
 ; GFX950-NEXT:  .LBB221_6: ; %atomicrmw.phi
 ; GFX950-NEXT:    ;;#ASMSTART
@@ -17202,9 +17192,8 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17227,7 +17216,6 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    buffer_store_dword v0, v4, s[0:3], 0 offen
@@ -17263,10 +17251,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17287,7 +17274,6 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
index c98fff96d7b8a..34a4899123749 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
@@ -5804,9 +5804,8 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v4, v6
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5839,10 +5838,9 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v4, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5880,9 +5878,8 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -5911,10 +5908,9 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -11573,9 +11569,8 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11609,10 +11604,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11651,9 +11645,8 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
 ; GFX90A-NEXT:    v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -11683,10 +11676,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 522c3c151efc3..46ed8da3a548a 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -1800,14 +1800,14 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[4:5], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffcf, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[4:5], v[4:5], v7
+; GCN-IR-NEXT:    v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 6307e28a6e598..6173aae52bae7 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -1970,14 +1970,14 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_addc_u32_e64 v13, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 16fffd4420714..47066a3c48bf3 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -1211,14 +1211,14 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v7
+; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 8567b817f5a45..76788fbdd9394 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -1360,14 +1360,14 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1

>From 119502245986cac251fa443aaddbedf67a2e03ba Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 10 Sep 2025 17:40:19 -0500
Subject: [PATCH 13/18] Calc IsVALU correctly during UADDO/USUBO selection

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp |   3 +-
 .../test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll |  98 +++--
 .../CodeGen/AMDGPU/a-v-global-atomicrmw.ll    |  24 +-
 llvm/test/CodeGen/AMDGPU/addsub64_carry.ll    |  53 ++-
 .../test/CodeGen/AMDGPU/carryout-selection.ll |  80 ++--
 llvm/test/CodeGen/AMDGPU/sdiv64.ll            | 378 +++++++++---------
 llvm/test/CodeGen/AMDGPU/srem64.ll            | 326 +++++++--------
 llvm/test/CodeGen/AMDGPU/uaddsat.ll           |  47 ++-
 llvm/test/CodeGen/AMDGPU/udiv64.ll            | 320 +++++++--------
 llvm/test/CodeGen/AMDGPU/urem64.ll            | 215 +++++-----
 llvm/test/CodeGen/AMDGPU/usubsat.ll           |  54 ++-
 11 files changed, 838 insertions(+), 760 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 2192a72bb27b7..f95d9edfd1ffc 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -1111,8 +1111,7 @@ void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) {
         {N->getOperand(0), N->getOperand(1),
          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
   } else {
-    unsigned Opc = N->getOpcode() == ISD::UADDO ? AMDGPU::S_UADDO_PSEUDO
-                                                : AMDGPU::S_USUBO_PSEUDO;
+    unsigned Opc = IsAdd ? AMDGPU::S_UADDO_PSEUDO : AMDGPU::S_USUBO_PSEUDO;
 
     CurDAG->SelectNodeTo(N, Opc, N->getVTList(),
                          {N->getOperand(0), N->getOperand(1)});
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
index 3f5c1d41cc7a9..d053425afbb6d 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
@@ -8760,8 +8760,9 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v6
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8780,19 +8781,20 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_cbranch_execz .LBB113_6
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[4:5]
-; GFX90A-NEXT:    v_cndmask_b32_e32 v0, -1, v4, vcc
-; GFX90A-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_cndmask_b32_e32 v4, -1, v4, vcc
+; GFX90A-NEXT:    buffer_load_dword v0, v4, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v1, v4, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v3, vcc, v1, v6
+; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v6
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v4, vcc, v2, v7, vcc
-; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v1
-; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
-; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v2
-; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v4, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v3, v0, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
+; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v0
+; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v3, 0, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v1
+; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT:    buffer_store_dword v0, v4, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    buffer_store_dword v2, v4, s[0:3], 0 offen
 ; GFX90A-NEXT:  .LBB113_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GFX90A-NEXT:    ;;#ASMSTART
@@ -8826,9 +8828,10 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8854,11 +8857,11 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
 ; GFX950-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT:    s_nop 0
+; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
 ; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
 ; GFX950-NEXT:    scratch_store_dwordx2 v4, v[2:3], off
 ; GFX950-NEXT:  .LBB113_6: ; %atomicrmw.phi
 ; GFX950-NEXT:    s_or_b64 exec, exec, s[0:1]
@@ -8898,8 +8901,9 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -8915,17 +8919,18 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_cbranch_execz .LBB114_6
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX90A-NEXT:    v_cndmask_b32_e32 v0, -1, v0, vcc
-; GFX90A-NEXT:    buffer_load_dword v4, v0, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v5, v0, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_cndmask_b32_e32 v6, -1, v0, vcc
+; GFX90A-NEXT:    buffer_load_dword v4, v6, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v1, vcc, v4, v2
+; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v4, v2
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v2, vcc, v5, v3, vcc
+; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v5, v3, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
-; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    buffer_store_dword v0, v6, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:  .LBB114_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GFX90A-NEXT:    ;;#ASMSTART
@@ -8958,9 +8963,10 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -8983,6 +8989,7 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -17058,8 +17065,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17078,19 +17086,20 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; GFX90A-NEXT:    s_cselect_b32 s4, s4, -1
-; GFX90A-NEXT:    v_mov_b32_e32 v0, s4
-; GFX90A-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_mov_b32_e32 v6, s4
+; GFX90A-NEXT:    buffer_load_dword v0, v6, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v1, v6, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v3, vcc, v1, v4
+; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v4, vcc, v2, v5, vcc
-; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v1
-; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
-; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v2
-; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v4, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v3, v0, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
+; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v0
+; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v3, 0, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v1
+; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT:    buffer_store_dword v0, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    buffer_store_dword v2, v6, s[0:3], 0 offen
 ; GFX90A-NEXT:  .LBB221_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    ;;#ASMSTART
 ; GFX90A-NEXT:    ; use a[0:1]
@@ -17123,9 +17132,10 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17149,11 +17159,11 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
 ; GFX950-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT:    s_nop 0
+; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
 ; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
 ; GFX950-NEXT:    scratch_store_dwordx2 off, v[2:3], s0
 ; GFX950-NEXT:  .LBB221_6: ; %atomicrmw.phi
 ; GFX950-NEXT:    ;;#ASMSTART
@@ -17192,8 +17202,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17216,6 +17227,7 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    buffer_store_dword v0, v4, s[0:3], 0 offen
@@ -17251,9 +17263,10 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17274,6 +17287,7 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
index 34a4899123749..c98fff96d7b8a 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
@@ -5804,8 +5804,9 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v4, v6
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
-; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5838,9 +5839,10 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v4, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5878,8 +5880,9 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -5908,9 +5911,10 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -11569,8 +11573,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11604,9 +11609,10 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11645,8 +11651,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
 ; GFX90A-NEXT:    v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -11676,9 +11683,10 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
+; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
index 397835972e4d4..d326966e849e2 100644
--- a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
+++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
@@ -17,9 +17,12 @@ define %struct.uint96 @v_add64_32(i64 %val64A, i64 %val64B, i32 %val32) {
 ; CHECK-LABEL: v_add64_32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
-; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; CHECK-NEXT:    v_add_co_u32_e32 v5, vcc, v0, v2
+; CHECK-NEXT:    v_addc_co_u32_e32 v6, vcc, v1, v3, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[5:6], v[0:1]
+; CHECK-NEXT:    v_mov_b32_e32 v0, v5
 ; CHECK-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v4, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v6
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %sum64 = add i64 %val64A, %val64B
   %obit = icmp ult i64 %sum64, %val64A
@@ -35,14 +38,16 @@ define <2 x i64> @v_uadd_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_add_co_u32_e32 v6, vcc, v2, v6
-; CHECK-NEXT:    v_add_co_u32_e64 v4, s[4:5], v0, v4
 ; CHECK-NEXT:    v_addc_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT:    v_addc_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT:    v_add_co_u32_e32 v4, vcc, v0, v4
+; CHECK-NEXT:    v_addc_co_u32_e32 v5, vcc, v1, v5, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
+; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v3, v2
-; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %pair = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -58,14 +63,16 @@ define <2 x i64> @v_usub_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_sub_co_u32_e32 v6, vcc, v2, v6
-; CHECK-NEXT:    v_sub_co_u32_e64 v4, s[4:5], v0, v4
 ; CHECK-NEXT:    v_subb_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT:    v_subb_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT:    v_sub_co_u32_e32 v4, vcc, v0, v4
+; CHECK-NEXT:    v_subb_co_u32_e32 v5, vcc, v1, v5, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[0:1]
+; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v3, v2
-; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %pair = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -80,9 +87,10 @@ define i64 @v_uadd_i64(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_uadd_i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
-; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
+; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
+; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -101,6 +109,7 @@ define i64 @v_uadd_p1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, 1, v0
 ; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
@@ -138,9 +147,10 @@ define i64 @v_usub_p1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_usub_p1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_subrev_co_u32_e32 v0, vcc, 1, v0
-; CHECK-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
+; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, -1, v0
+; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -157,9 +167,10 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_usub_n1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_subrev_co_u32_e32 v0, vcc, -1, v0
-; CHECK-NEXT:    v_subbrev_co_u32_e32 v1, vcc, -1, v1, vcc
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
+; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, 1, v0
+; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
+; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index d08e3f1480616..51652a09863e0 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -841,7 +841,7 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GCN-ISEL-LABEL: name:   vuaddo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_ADD_CO_U32_e64
+; GCN-ISEL: V_ADD_U64_PSEUDO
 
 define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
 ; CISI-LABEL: vuaddo64:
@@ -854,8 +854,9 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s4, s0
 ; CISI-NEXT:    v_mov_b32_e32 v1, s9
 ; CISI-NEXT:    v_add_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT:    s_mov_b32 s5, s1
 ; CISI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; CISI-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[0:1]
+; CISI-NEXT:    s_mov_b32 s5, s1
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s6
@@ -875,6 +876,7 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI-NEXT:    v_mov_b32_e32 v6, s5
 ; VI-NEXT:    v_add_u32_e32 v5, vcc, s4, v0
 ; VI-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
+; VI-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[5:6]
 ; VI-NEXT:    v_mov_b32_e32 v2, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s2
 ; VI-NEXT:    v_mov_b32_e32 v4, s3
@@ -892,6 +894,7 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s6, v0
 ; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v2, v0, s[2:3]
@@ -906,7 +909,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1010-NEXT:    v_add_co_u32 v0, s4, s6, v0
 ; GFX1010-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
+; GFX1010-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
+; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1010-NEXT:    s_endpgm
@@ -919,8 +923,9 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W32-NEXT:    v_add_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
+; GFX1030W32-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX1030W32-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
+; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W32-NEXT:    s_endpgm
@@ -933,8 +938,9 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W64-NEXT:    v_add_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT:    v_add_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
-; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
+; GFX1030W64-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
+; GFX1030W64-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
+; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W64-NEXT:    s_endpgm
@@ -949,9 +955,10 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_co_u32 v0, s4, s6, v0
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
+; GFX11-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    global_store_b8 v2, v3, s[2:3]
@@ -962,17 +969,16 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    s_load_b64 s[6:7], s[4:5], 0x34
 ; GFX1250-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX1250-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT:    v_add_co_u32 v0, s4, s6, v0
-; GFX1250-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1]
+; GFX1250-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[2:3]
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX1250-NEXT:    s_clause 0x1
-; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
-; GFX1250-NEXT:    global_store_b8 v2, v3, s[2:3]
+; GFX1250-NEXT:    global_store_b64 v1, v[2:3], s[0:1]
+; GFX1250-NEXT:    global_store_b8 v1, v0, s[2:3]
 ; GFX1250-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
@@ -1815,7 +1821,7 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GCN-ISEL-LABEL: name:   vusubo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_SUBB_U32_e64
+; GCN-ISEL: V_SUB_U64_PSEUDO
 
 define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
 ; CISI-LABEL: vusubo64:
@@ -1828,8 +1834,9 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s4, s0
 ; CISI-NEXT:    v_mov_b32_e32 v1, s9
 ; CISI-NEXT:    v_sub_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT:    s_mov_b32 s5, s1
 ; CISI-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; CISI-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
+; CISI-NEXT:    s_mov_b32 s5, s1
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s6
@@ -1849,6 +1856,7 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI-NEXT:    v_mov_b32_e32 v6, s5
 ; VI-NEXT:    v_sub_u32_e32 v5, vcc, s4, v0
 ; VI-NEXT:    v_subbrev_u32_e32 v6, vcc, 0, v6, vcc
+; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
 ; VI-NEXT:    v_mov_b32_e32 v2, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s2
 ; VI-NEXT:    v_mov_b32_e32 v4, s3
@@ -1866,6 +1874,7 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s6, v0
 ; GFX9-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v2, v0, s[2:3]
@@ -1880,7 +1889,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1010-NEXT:    v_sub_co_u32 v0, s4, s6, v0
 ; GFX1010-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
+; GFX1010-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
+; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1010-NEXT:    s_endpgm
@@ -1893,8 +1903,9 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W32-NEXT:    v_sub_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
+; GFX1030W32-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX1030W32-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
+; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W32-NEXT:    s_endpgm
@@ -1907,8 +1918,9 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W64-NEXT:    v_sub_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT:    v_sub_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
-; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
+; GFX1030W64-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
+; GFX1030W64-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
+; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W64-NEXT:    s_endpgm
@@ -1923,9 +1935,10 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_sub_co_u32 v0, s4, s6, v0
-; GFX11-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX11-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
+; GFX11-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    global_store_b8 v2, v3, s[2:3]
@@ -1936,17 +1949,16 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    s_load_b64 s[6:7], s[4:5], 0x34
 ; GFX1250-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
+; GFX1250-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX1250-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
-; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT:    v_sub_co_u32 v0, s4, s6, v0
-; GFX1250-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_sub_nc_u64_e32 v[2:3], s[6:7], v[0:1]
+; GFX1250-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[2:3]
+; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
 ; GFX1250-NEXT:    s_clause 0x1
-; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
-; GFX1250-NEXT:    global_store_b8 v2, v3, s[2:3]
+; GFX1250-NEXT:    global_store_b64 v1, v[2:3], s[0:1]
+; GFX1250-NEXT:    global_store_b8 v1, v0, s[2:3]
 ; GFX1250-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 46ed8da3a548a..ddec6af0af69e 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -389,25 +389,25 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_sdiv:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v11, 31, v3
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v1, v10, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v11
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v11
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v11
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v13, 31, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v13
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v13
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v13
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v6
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v7
-; GCN-IR-NEXT:    v_min_u32_e32 v9, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v8, v9
+; GCN-IR-NEXT:    v_min_u32_e32 v11, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v10, v11
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[6:7], 0, 0, s[6:7]
@@ -416,69 +416,70 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v11
+; GCN-IR-NEXT:    v_mov_b32_e32 v14, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v15, v13
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v7, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v6, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[6:7], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[6:7], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[6:7], v14
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v4, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, v4, v9
-; GCN-IR-NEXT:    v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v4, v10
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[6:7], v8
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v4, v11
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], -1, 0, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v7, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, 1, v16
-; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, 0, v17, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v16, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v17, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB1_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v11, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v13, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v13, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v15, v14
 ; GCN-IR-NEXT:    v_xor_b32_e32 v3, v4, v0
 ; GCN-IR-NEXT:    v_xor_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v3, v0
@@ -1473,17 +1474,17 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1491,68 +1492,69 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, 24, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v14, vcc, 58, v8
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v6
-; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v7, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB11_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB11_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v11
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 24, %x
   ret i64 %result
@@ -1665,17 +1667,17 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1684,69 +1686,70 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v14, vcc, 47, v8
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
-; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v7, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB12_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v11
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 32768, %x
   ret i64 %result
@@ -1766,20 +1769,20 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v8
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v8
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v0, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, v1, v8, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, v1, v10, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v0, v4
 ; GCN-IR-NEXT:    v_add_i32_e64 v0, s[4:5], 32, v0
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v1, v5
-; GCN-IR-NEXT:    v_min_u32_e32 v6, v0, v1
-; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 48, v6
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v0, v1
+; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 48, v8
 ; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
@@ -1789,60 +1792,61 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], v0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], v0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 0xffffffcf, v6
-; GCN-IR-NEXT:    v_lshr_b64 v[4:5], v[4:5], v7
-; GCN-IR-NEXT:    v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[4:5], v6
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s12, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s10, v4
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v6, v0
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 0x8000, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v4, v6
-; GCN-IR-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v7, v1
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v3, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v2, v0
 ; GCN-IR-NEXT:  .LBB13_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v8
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v9
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v9, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 %x, 32768
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 6173aae52bae7..2d95875cad882 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -373,12 +373,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_srem:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v14, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v14
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v14, vcc
 ; GCN-IR-NEXT:    v_xor_b32_e32 v2, v2, v4
 ; GCN-IR-NEXT:    v_xor_b32_e32 v3, v3, v4
 ; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v2, v4
@@ -386,12 +386,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v11, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v10, v11
+; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -400,7 +400,7 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v15, v14
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
@@ -408,53 +408,54 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v6, v10
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, v6, v11
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v8
-; GCN-IR-NEXT:    v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
+; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v9, v11, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, 1, v16
-; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, 0, v17, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v16, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v17, v11, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
@@ -468,10 +469,10 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v15
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v15, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, %y
   ret i64 %result
@@ -1648,9 +1649,9 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1664,52 +1665,53 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 58, v8
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v6
-; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB11_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1838,9 +1840,9 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1855,53 +1857,54 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
-; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1936,20 +1939,20 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_srem_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v8
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
@@ -1960,50 +1963,51 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 0xffffffcf, v8
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
-; GCN-IR-NEXT:    v_addc_u32_e64 v13, s[8:9], 0, -1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v8
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
-; GCN-IR-NEXT:    v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -2012,10 +2016,10 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[4:5], 15
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v11
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v13
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, 32768
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
index 7f89581d00fde..923017400adb1 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
@@ -693,47 +693,52 @@ define i64 @v_uaddsat_i64(i64 %lhs, i64 %rhs) {
 ; GFX6-LABEL: v_uaddsat_i64:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v0, v2
+; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
+; GFX6-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_uaddsat_i64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v0, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_uaddsat_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
-; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_uaddsat_i64:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc_lo
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_uaddsat_i64:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
+; GFX11-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %result = call i64 @llvm.uadd.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 47066a3c48bf3..dc11e81476a7e 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -313,19 +313,19 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v14, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v9, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[6:7], v8, v9
+; GCN-IR-NEXT:    v_min_u32_e32 v15, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[6:7], v14, v15
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[6:7], 0, 0, s[6:7]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[6:7]
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[6:7], 0, 0, s[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[8:9]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[8:9]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v0, 0, s[4:5]
@@ -333,54 +333,55 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v8
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v8
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v10
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v6, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, v6, v9
-; GCN-IR-NEXT:    v_addc_u32_e64 v13, s[8:9], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v2
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v10
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v0, v14
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v15
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], -1, 0, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v10, v0
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v11, v1, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v8, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v9, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v2
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v7
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v7, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v6, v0
@@ -1097,12 +1098,12 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffd0, v8
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffd0, v10
+; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[6:7]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0x8000
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[4:5]
@@ -1112,54 +1113,55 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
-; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB9_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
@@ -1186,13 +1188,13 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v6
-; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], 48, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1200,51 +1202,52 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 0xffffffcf, v6
-; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v7
-; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v8
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffcf, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v0
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v6, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v4
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v6
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 0x8000, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v6
-; GCN-IR-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v7, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, v5
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB10_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB10_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
@@ -1385,13 +1388,13 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 59, v6
-; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], 59, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1399,50 +1402,51 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 0xffffffc4, v6
-; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v7
-; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v8
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc4, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 23, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 23, v0
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v6, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v4
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v6
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 24, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v6
-; GCN-IR-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v7, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, v5
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 24, v8
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 76788fbdd9394..dc25caadb99a9 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -362,12 +362,12 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v11, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v10, v11
+; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -383,53 +383,54 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v6, v10
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, v6, v11
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v8
-; GCN-IR-NEXT:    v_addc_u32_e64 v15, s[8:9], -1, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
+; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v9, v11, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v11, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
@@ -1240,8 +1241,8 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffd0, v8
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffd0, v10
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1256,53 +1257,54 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB8_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
-; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB8_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB8_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1335,8 +1337,8 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v8
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
@@ -1350,50 +1352,51 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 0xffffffcf, v8
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
-; GCN-IR-NEXT:    v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v8
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
-; GCN-IR-NEXT:    v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB9_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
diff --git a/llvm/test/CodeGen/AMDGPU/usubsat.ll b/llvm/test/CodeGen/AMDGPU/usubsat.ll
index 3ddb2f02c48fe..90491a07289a0 100644
--- a/llvm/test/CodeGen/AMDGPU/usubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/usubsat.ll
@@ -730,38 +730,52 @@ define i64 @v_usubsat_i64(i64 %lhs, i64 %rhs) {
 ; GFX6-LABEL: v_usubsat_i64:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
-; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, v0, v2
+; GFX6-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
+; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_usubsat_i64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v2
-; GFX8-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v2, vcc, v0, v2
+; GFX8-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_usubsat_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v2
-; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
+; GFX9-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v2
+; GFX9-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10PLUS-LABEL: v_usubsat_i64:
-; GFX10PLUS:       ; %bb.0:
-; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10PLUS-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2
-; GFX10PLUS-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
-; GFX10PLUS-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc_lo
-; GFX10PLUS-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc_lo
-; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
+; GFX10-LABEL: v_usubsat_i64:
+; GFX10:       ; %bb.0:
+; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
+; GFX10-NEXT:    v_sub_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc_lo
+; GFX10-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: v_usubsat_i64:
+; GFX11:       ; %bb.0:
+; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
+; GFX11-NEXT:    v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
+; GFX11-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc_lo
+; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %result = call i64 @llvm.usub.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %result
 }

>From 7c30e3827ce9d9eb265aae246f31e9902eb74821 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Mon, 15 Sep 2025 10:01:17 -0500
Subject: [PATCH 14/18] Do not generate S_CMP if add/sub carryout is available

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 107 +++++++++++++++++++---
 1 file changed, 93 insertions(+), 14 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index e0ea01ec183f8..58066ee9b96e5 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6081,9 +6081,9 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     MachineOperand &Src0 = MI.getOperand(2);
     MachineOperand &Src1 = MI.getOperand(3);
     MachineOperand &Src2 = MI.getOperand(4);
-    unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
-                       ? AMDGPU::S_ADDC_U32
-                       : AMDGPU::S_SUBB_U32;
+
+    bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO);
+
     if (Src0.isReg() && TRI->isVectorRegister(MRI, Src0.getReg())) {
       Register RegOp0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
       BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp0)
@@ -6103,6 +6103,7 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
       Src2.setReg(RegOp2);
     }
 
+<<<<<<< HEAD
     if (ST.isWave64()) {
       if (ST.hasScalarCompareEq64()) {
         BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64))
@@ -6140,6 +6141,89 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
 
     unsigned SelOpc =
         ST.isWave64() ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
+=======
+    const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
+    unsigned WaveSize = TRI->getRegSizeInBits(*Src2RC);
+    assert(WaveSize == 64 || WaveSize == 32);
+
+    unsigned SelOpc =
+        (WaveSize == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
+    unsigned AddcSubbOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
+    unsigned AddSubOpc = IsAdd ? AMDGPU::S_ADD_I32 : AMDGPU::S_SUB_I32;
+    //  Lowering for:
+    //
+    //    S_UADDO_PSEUDO|S_ADD_CO_PSEUDO
+    //    <no SCC def code>
+    //    S_ADD_CO_PSEUDO
+    //
+    //  produces:
+    //
+    //    S_ADD_I32|S_ADDC_U32                  ; lowered from S_UADDO_PSEUDO
+    //    SREG = S_CSELECT_B32|64 [1,-1], 0     ; lowered from S_UADDO_PSEUDO
+    //    <no SCC def code>
+    //    S_CMP32|64 SREG, 0                    ; lowered from S_ADD_CO_PSEUDO
+    //    S_ADDC_U32                            ; lowered from S_ADD_CO_PSEUDO
+    //
+    //  At this point before generating the S_CMP check if it is redundant.  If
+    //  so do not recalculate it.  Subsequent optimizations will also delete the
+    //  dead S_CSELECT*.
+
+    bool RecalculateSCC{true};
+    MachineInstr *Def = MRI.getVRegDef(Src2.getReg());
+    if (Def && Def->getParent() == BB && Def->getOpcode() == SelOpc &&
+        Def->getOperand(1).isImm() && Def->getOperand(1).getImm() != 0 &&
+        Def->getOperand(2).isImm() && Def->getOperand(2).getImm() == 0) {
+
+      auto I1 = std::next(MachineBasicBlock::reverse_iterator(Def));
+      if (I1 != BB->rend() &&
+          (I1->getOpcode() == AddSubOpc || I1->getOpcode() == AddcSubbOpc)) {
+        RecalculateSCC = false;
+        // Ensure there are no intervening definitions of SCC.
+        for (auto I2 = std::next(MachineBasicBlock::reverse_iterator(MI));
+             I2 != I1; I2++) {
+          if (I2->definesRegister(AMDGPU::SCC, TRI)) {
+            RecalculateSCC = true;
+            break;
+          }
+        }
+      }
+    }
+
+    if (RecalculateSCC) {
+      if (WaveSize == 64) {
+        if (ST.hasScalarCompareEq64()) {
+          BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64))
+              .addReg(Src2.getReg())
+              .addImm(0);
+        } else {
+          const TargetRegisterClass *SubRC =
+              TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
+          MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm(
+              MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC);
+          MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm(
+              MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC);
+          Register Src2_32 =
+              MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+          BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32)
+              .add(Src2Sub0)
+              .add(Src2Sub1);
+
+          BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
+              .addReg(Src2_32, RegState::Kill)
+              .addImm(0);
+        }
+      } else {
+        BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
+            .addReg(Src2.getReg())
+            .addImm(0);
+      }
+    }
+
+    BuildMI(*BB, MII, DL, TII->get(AddcSubbOpc), Dest.getReg())
+        .add(Src0)
+        .add(Src1);
+>>>>>>> 0cb43743ea30 (Do not generate S_CMP if add/sub carryout is available)
 
     BuildMI(*BB, MII, DL, TII->get(SelOpc), CarryDest.getReg())
         .addImm(-1)
@@ -16588,17 +16672,12 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
   // LHS = ADD i64 Y, 1            LHSlo = UADDO       i32 Ylo, 1
   // setcc LHS eq 0        ->      LHSHi = UADDO_CARRY i32 Yhi, 0
 
-  // Don't split a 64-bit add/sub into two 32-bit add/sub instructions for
-  // non-divergent operations.  This can result in lo/hi 32-bit operations
-  // being done in SGPR and VGPR with additional operations being needed
-  // to move operands and/or generate the intermediate carry.
-  if (VT == MVT::i64 && N->isDivergent() &&
-      ((CC == ISD::SETULT &&
-        sd_match(LHS, m_Add(m_Specific(RHS), m_Value()))) ||
-       (CC == ISD::SETUGT &&
-        sd_match(LHS, m_Sub(m_Specific(RHS), m_Value()))) ||
-       (CC == ISD::SETEQ && CRHS && CRHS->isZero() &&
-        sd_match(LHS, m_Add(m_Value(), m_One()))))) {
+  if (VT == MVT::i64 && ((CC == ISD::SETULT &&
+                          sd_match(LHS, m_Add(m_Specific(RHS), m_Value()))) ||
+                         (CC == ISD::SETUGT &&
+                          sd_match(LHS, m_Sub(m_Specific(RHS), m_Value()))) ||
+                         (CC == ISD::SETEQ && CRHS && CRHS->isZero() &&
+                          sd_match(LHS, m_Add(m_Value(), m_One()))))) {
     EVT TargetType = MVT::i32;
     EVT CarryVT = MVT::i1;
     bool IsAdd = LHS.getOpcode() == ISD::ADD;

>From d88ffc5362385f180c3d943cced8ea97710d78f4 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Tue, 16 Sep 2025 09:26:42 -0500
Subject: [PATCH 15/18] Limit search.  Make code clearer.

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 71 ++++++-----------------
 1 file changed, 18 insertions(+), 53 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 58066ee9b96e5..6049eb61832bb 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6103,50 +6103,11 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
       Src2.setReg(RegOp2);
     }
 
-<<<<<<< HEAD
-    if (ST.isWave64()) {
-      if (ST.hasScalarCompareEq64()) {
-        BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64))
-            .addReg(Src2.getReg())
-            .addImm(0);
-      } else {
-        const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
-        const TargetRegisterClass *SubRC =
-            TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
-        MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm(
-            MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC);
-        MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm(
-            MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC);
-        Register Src2_32 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-
-        BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32)
-            .add(Src2Sub0)
-            .add(Src2Sub1);
-
-        BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
-            .addReg(Src2_32, RegState::Kill)
-            .addImm(0);
-      }
-    } else {
-      BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
-          .addReg(Src2.getReg())
-          .addImm(0);
-    }
-
-    // clang-format off
-    BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg())
-        .add(Src0)
-        .add(Src1);
-    // clang-format on
-
-    unsigned SelOpc =
-        ST.isWave64() ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
-=======
     const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
     unsigned WaveSize = TRI->getRegSizeInBits(*Src2RC);
     assert(WaveSize == 64 || WaveSize == 32);
 
-    unsigned SelOpc =
+    unsigned SelectOpc =
         (WaveSize == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
     unsigned AddcSubbOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
     unsigned AddSubOpc = IsAdd ? AMDGPU::S_ADD_I32 : AMDGPU::S_SUB_I32;
@@ -6169,22 +6130,27 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     //  dead S_CSELECT*.
 
     bool RecalculateSCC{true};
-    MachineInstr *Def = MRI.getVRegDef(Src2.getReg());
-    if (Def && Def->getParent() == BB && Def->getOpcode() == SelOpc &&
-        Def->getOperand(1).isImm() && Def->getOperand(1).getImm() != 0 &&
-        Def->getOperand(2).isImm() && Def->getOperand(2).getImm() == 0) {
-
-      auto I1 = std::next(MachineBasicBlock::reverse_iterator(Def));
+    MachineInstr *SelectDef = MRI.getVRegDef(Src2.getReg());
+    if (SelectDef && SelectDef->getParent() == BB &&
+        SelectDef->getOpcode() == SelectOpc &&
+        SelectDef->getOperand(1).isImm() &&
+        SelectDef->getOperand(1).getImm() != 0 &&
+        SelectDef->getOperand(2).isImm() &&
+        SelectDef->getOperand(2).getImm() == 0) {
+      auto I1 = std::next(MachineBasicBlock::reverse_iterator(SelectDef));
       if (I1 != BB->rend() &&
           (I1->getOpcode() == AddSubOpc || I1->getOpcode() == AddcSubbOpc)) {
-        RecalculateSCC = false;
-        // Ensure there are no intervening definitions of SCC.
+        // Ensure there are no intervening definitions of SCC between ADDs/SUBs
+        const unsigned SearchLimit = 6;
+        unsigned Count = 0;
         for (auto I2 = std::next(MachineBasicBlock::reverse_iterator(MI));
-             I2 != I1; I2++) {
-          if (I2->definesRegister(AMDGPU::SCC, TRI)) {
-            RecalculateSCC = true;
+             Count < SearchLimit; I2++, Count++) {
+          if (I2 == I1) {
+            RecalculateSCC = false;
             break;
           }
+          if (I2->definesRegister(AMDGPU::SCC, TRI))
+            break;
         }
       }
     }
@@ -6223,9 +6189,8 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     BuildMI(*BB, MII, DL, TII->get(AddcSubbOpc), Dest.getReg())
         .add(Src0)
         .add(Src1);
->>>>>>> 0cb43743ea30 (Do not generate S_CMP if add/sub carryout is available)
 
-    BuildMI(*BB, MII, DL, TII->get(SelOpc), CarryDest.getReg())
+    BuildMI(*BB, MII, DL, TII->get(SelectOpc), CarryDest.getReg())
         .addImm(-1)
         .addImm(0);
 

>From 054964c7197510a2054054181e40d13898da9ab5 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Wed, 24 Sep 2025 15:04:07 -0500
Subject: [PATCH 16/18] Delete unnecessary S_CMP in separate PR

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 89 ++++++-----------------
 1 file changed, 21 insertions(+), 68 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 6049eb61832bb..b1989f005fe73 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6110,81 +6110,34 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     unsigned SelectOpc =
         (WaveSize == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
     unsigned AddcSubbOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
-    unsigned AddSubOpc = IsAdd ? AMDGPU::S_ADD_I32 : AMDGPU::S_SUB_I32;
-    //  Lowering for:
-    //
-    //    S_UADDO_PSEUDO|S_ADD_CO_PSEUDO
-    //    <no SCC def code>
-    //    S_ADD_CO_PSEUDO
-    //
-    //  produces:
-    //
-    //    S_ADD_I32|S_ADDC_U32                  ; lowered from S_UADDO_PSEUDO
-    //    SREG = S_CSELECT_B32|64 [1,-1], 0     ; lowered from S_UADDO_PSEUDO
-    //    <no SCC def code>
-    //    S_CMP32|64 SREG, 0                    ; lowered from S_ADD_CO_PSEUDO
-    //    S_ADDC_U32                            ; lowered from S_ADD_CO_PSEUDO
-    //
-    //  At this point before generating the S_CMP check if it is redundant.  If
-    //  so do not recalculate it.  Subsequent optimizations will also delete the
-    //  dead S_CSELECT*.
-
-    bool RecalculateSCC{true};
-    MachineInstr *SelectDef = MRI.getVRegDef(Src2.getReg());
-    if (SelectDef && SelectDef->getParent() == BB &&
-        SelectDef->getOpcode() == SelectOpc &&
-        SelectDef->getOperand(1).isImm() &&
-        SelectDef->getOperand(1).getImm() != 0 &&
-        SelectDef->getOperand(2).isImm() &&
-        SelectDef->getOperand(2).getImm() == 0) {
-      auto I1 = std::next(MachineBasicBlock::reverse_iterator(SelectDef));
-      if (I1 != BB->rend() &&
-          (I1->getOpcode() == AddSubOpc || I1->getOpcode() == AddcSubbOpc)) {
-        // Ensure there are no intervening definitions of SCC between ADDs/SUBs
-        const unsigned SearchLimit = 6;
-        unsigned Count = 0;
-        for (auto I2 = std::next(MachineBasicBlock::reverse_iterator(MI));
-             Count < SearchLimit; I2++, Count++) {
-          if (I2 == I1) {
-            RecalculateSCC = false;
-            break;
-          }
-          if (I2->definesRegister(AMDGPU::SCC, TRI))
-            break;
-        }
-      }
-    }
 
-    if (RecalculateSCC) {
-      if (WaveSize == 64) {
-        if (ST.hasScalarCompareEq64()) {
-          BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64))
-              .addReg(Src2.getReg())
-              .addImm(0);
-        } else {
-          const TargetRegisterClass *SubRC =
-              TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
-          MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm(
-              MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC);
-          MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm(
-              MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC);
-          Register Src2_32 =
-              MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
-
-          BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32)
-              .add(Src2Sub0)
-              .add(Src2Sub1);
+    if (WaveSize == 64) {
+      if (ST.hasScalarCompareEq64()) {
+        BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64))
+            .addReg(Src2.getReg())
+            .addImm(0);
+      } else {
+        const TargetRegisterClass *SubRC =
+            TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
+        MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm(
+            MII, MRI, Src2, Src2RC, AMDGPU::sub0, SubRC);
+        MachineOperand Src2Sub1 = TII->buildExtractSubRegOrImm(
+            MII, MRI, Src2, Src2RC, AMDGPU::sub1, SubRC);
+        Register Src2_32 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
+
+        BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_OR_B32), Src2_32)
+            .add(Src2Sub0)
+            .add(Src2Sub1);
 
-          BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
-              .addReg(Src2_32, RegState::Kill)
-              .addImm(0);
-        }
+        BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
+            .addReg(Src2_32, RegState::Kill)
+            .addImm(0);
+      }
       } else {
         BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
             .addReg(Src2.getReg())
             .addImm(0);
       }
-    }
 
     BuildMI(*BB, MII, DL, TII->get(AddcSubbOpc), Dest.getReg())
         .add(Src0)

>From 5be1306b417e64cc22867838d23b7605a92f3277 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Fri, 26 Sep 2025 09:59:27 -0500
Subject: [PATCH 17/18] Update tests

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |  37 +-
 .../test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll |  98 ++--
 .../CodeGen/AMDGPU/a-v-global-atomicrmw.ll    |  24 +-
 llvm/test/CodeGen/AMDGPU/addsub64_carry.ll    | 192 ++++---
 .../test/CodeGen/AMDGPU/carryout-selection.ll | 322 ++++++------
 llvm/test/CodeGen/AMDGPU/sdiv64.ll            | 470 ++++++++---------
 llvm/test/CodeGen/AMDGPU/srem64.ll            | 488 +++++++++---------
 llvm/test/CodeGen/AMDGPU/uaddo.ll             | 201 ++++----
 llvm/test/CodeGen/AMDGPU/uaddsat.ll           |  47 +-
 llvm/test/CodeGen/AMDGPU/udiv64.ll            | 472 +++++++++--------
 llvm/test/CodeGen/AMDGPU/urem64.ll            | 379 +++++++-------
 llvm/test/CodeGen/AMDGPU/usubo.ll             | 201 ++++----
 llvm/test/CodeGen/AMDGPU/usubsat.ll           |  54 +-
 13 files changed, 1501 insertions(+), 1484 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index b1989f005fe73..4c60bc95e2c06 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6081,9 +6081,6 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
     MachineOperand &Src0 = MI.getOperand(2);
     MachineOperand &Src1 = MI.getOperand(3);
     MachineOperand &Src2 = MI.getOperand(4);
-
-    bool IsAdd = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO);
-
     if (Src0.isReg() && TRI->isVectorRegister(MRI, Src0.getReg())) {
       Register RegOp0 = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
       BuildMI(*BB, MII, DL, TII->get(AMDGPU::V_READFIRSTLANE_B32), RegOp0)
@@ -6103,20 +6100,13 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
       Src2.setReg(RegOp2);
     }
 
-    const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
-    unsigned WaveSize = TRI->getRegSizeInBits(*Src2RC);
-    assert(WaveSize == 64 || WaveSize == 32);
-
-    unsigned SelectOpc =
-        (WaveSize == 64) ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
-    unsigned AddcSubbOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
-
-    if (WaveSize == 64) {
+    if (ST.isWave64()) {
       if (ST.hasScalarCompareEq64()) {
         BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U64))
             .addReg(Src2.getReg())
             .addImm(0);
       } else {
+        const TargetRegisterClass *Src2RC = MRI.getRegClass(Src2.getReg());
         const TargetRegisterClass *SubRC =
             TRI->getSubRegisterClass(Src2RC, AMDGPU::sub0);
         MachineOperand Src2Sub0 = TII->buildExtractSubRegOrImm(
@@ -6133,17 +6123,22 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
             .addReg(Src2_32, RegState::Kill)
             .addImm(0);
       }
-      } else {
-        BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
-            .addReg(Src2.getReg())
-            .addImm(0);
-      }
+    } else {
+      BuildMI(*BB, MII, DL, TII->get(AMDGPU::S_CMP_LG_U32))
+          .addReg(Src2.getReg())
+          .addImm(0);
+    }
 
-    BuildMI(*BB, MII, DL, TII->get(AddcSubbOpc), Dest.getReg())
-        .add(Src0)
-        .add(Src1);
+    unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
+                       ? AMDGPU::S_ADDC_U32
+                       : AMDGPU::S_SUBB_U32;
+
+    BuildMI(*BB, MII, DL, TII->get(Opc), Dest.getReg()).add(Src0).add(Src1);
+
+    unsigned SelOpc =
+        ST.isWave64() ? AMDGPU::S_CSELECT_B64 : AMDGPU::S_CSELECT_B32;
 
-    BuildMI(*BB, MII, DL, TII->get(SelectOpc), CarryDest.getReg())
+    BuildMI(*BB, MII, DL, TII->get(SelOpc), CarryDest.getReg())
         .addImm(-1)
         .addImm(0);
 
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
index d053425afbb6d..3f5c1d41cc7a9 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-flat-atomicrmw.ll
@@ -8760,9 +8760,8 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v6
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8781,20 +8780,19 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_cbranch_execz .LBB113_6
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[4:5]
-; GFX90A-NEXT:    v_cndmask_b32_e32 v4, -1, v4, vcc
-; GFX90A-NEXT:    buffer_load_dword v0, v4, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v1, v4, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_cndmask_b32_e32 v0, -1, v4, vcc
+; GFX90A-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v6
+; GFX90A-NEXT:    v_sub_co_u32_e32 v3, vcc, v1, v6
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v3, 0, vcc
-; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v1
-; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v0, v4, s[0:3], 0 offen offset:4
-; GFX90A-NEXT:    buffer_store_dword v2, v4, s[0:3], 0 offen
+; GFX90A-NEXT:    v_subb_co_u32_e32 v4, vcc, v2, v7, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v1
+; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v2
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX90A-NEXT:    buffer_store_dword v3, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:  .LBB113_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GFX90A-NEXT:    ;;#ASMSTART
@@ -8828,10 +8826,9 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v7, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -8857,11 +8854,11 @@ define void @flat_atomic_usub_sat_i64_ret_a_a(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v7, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
 ; GFX950-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
+; GFX950-NEXT:    s_nop 0
 ; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
 ; GFX950-NEXT:    scratch_store_dwordx2 v4, v[2:3], off
 ; GFX950-NEXT:  .LBB113_6: ; %atomicrmw.phi
 ; GFX950-NEXT:    s_or_b64 exec, exec, s[0:1]
@@ -8901,9 +8898,8 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -8919,18 +8915,17 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX90A-NEXT:    s_cbranch_execz .LBB114_6
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GFX90A-NEXT:    v_cndmask_b32_e32 v6, -1, v0, vcc
-; GFX90A-NEXT:    buffer_load_dword v4, v6, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v5, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_cndmask_b32_e32 v0, -1, v0, vcc
+; GFX90A-NEXT:    buffer_load_dword v4, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v5, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v4, v2
+; GFX90A-NEXT:    v_sub_co_u32_e32 v1, vcc, v4, v2
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v5, v3, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[4:5]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_subb_co_u32_e32 v2, vcc, v5, v3, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v0, v6, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_store_dword v1, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_store_dword v2, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:  .LBB114_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GFX90A-NEXT:    ;;#ASMSTART
@@ -8963,10 +8958,9 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -8989,7 +8983,6 @@ define void @flat_atomic_usub_sat_i64_ret_av_av(ptr %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
@@ -17065,9 +17058,8 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17086,20 +17078,19 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:  ; %bb.5: ; %atomicrmw.private
 ; GFX90A-NEXT:    s_cmp_lg_u64 s[4:5], 0
 ; GFX90A-NEXT:    s_cselect_b32 s4, s4, -1
-; GFX90A-NEXT:    v_mov_b32_e32 v6, s4
-; GFX90A-NEXT:    buffer_load_dword v0, v6, s[0:3], 0 offen
-; GFX90A-NEXT:    buffer_load_dword v1, v6, s[0:3], 0 offen offset:4
+; GFX90A-NEXT:    v_mov_b32_e32 v0, s4
+; GFX90A-NEXT:    buffer_load_dword v1, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_load_dword v2, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(1)
-; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v4
+; GFX90A-NEXT:    v_sub_co_u32_e32 v3, vcc, v1, v4
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v3, 0, vcc
-; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v1
-; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
-; GFX90A-NEXT:    buffer_store_dword v0, v6, s[0:3], 0 offen offset:4
-; GFX90A-NEXT:    buffer_store_dword v2, v6, s[0:3], 0 offen
+; GFX90A-NEXT:    v_subb_co_u32_e32 v4, vcc, v2, v5, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a0, v1
+; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX90A-NEXT:    v_accvgpr_write_b32 a1, v2
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX90A-NEXT:    buffer_store_dword v3, v0, s[0:3], 0 offen
+; GFX90A-NEXT:    buffer_store_dword v1, v0, s[0:3], 0 offen offset:4
 ; GFX90A-NEXT:  .LBB221_6: ; %atomicrmw.phi
 ; GFX90A-NEXT:    ;;#ASMSTART
 ; GFX90A-NEXT:    ; use a[0:1]
@@ -17132,10 +17123,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[0:1], v[6:7], v[0:3] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -17159,11 +17149,11 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_a_a(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v5, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
 ; GFX950-NEXT:    v_accvgpr_write_b32 a0, v0
-; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
+; GFX950-NEXT:    s_nop 0
 ; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT:    v_accvgpr_write_b32 a1, v1
 ; GFX950-NEXT:    scratch_store_dwordx2 off, v[2:3], s0
 ; GFX950-NEXT:  .LBB221_6: ; %atomicrmw.phi
 ; GFX950-NEXT:    ;;#ASMSTART
@@ -17202,9 +17192,8 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17227,7 +17216,6 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    buffer_store_dword v0, v4, s[0:3], 0 offen
@@ -17263,10 +17251,9 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    flat_atomic_cmpswap_x2 v[2:3], v[4:5], v[6:9] sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -17287,7 +17274,6 @@ define void @flat_atomic_usub_sat_i64_saddr_ret_av_av(ptr inreg %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
diff --git a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
index c98fff96d7b8a..34a4899123749 100644
--- a/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
+++ b/llvm/test/CodeGen/AMDGPU/a-v-global-atomicrmw.ll
@@ -5804,9 +5804,8 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v4, v6
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5839,10 +5838,9 @@ define void @global_atomic_usub_sat_i64_ret_a_a(ptr addrspace(1) %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v4, v6
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v5, v7, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[4:5]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v3, v3, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[2:3], v[0:1], v[2:5], off offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
@@ -5880,9 +5878,8 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
 ; GFX90A-NEXT:    v_pk_mov_b32 v[6:7], v[4:5], v[4:5] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -5911,10 +5908,9 @@ define void @global_atomic_usub_sat_i64_ret_av_av(ptr addrspace(1) %ptr) #0 {
 ; GFX950-NEXT:    v_sub_co_u32_e32 v4, vcc, v6, v2
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v5, vcc, v7, v3, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[6:7]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v4, v4, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v5, v5, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[4:5], v[0:1], v[4:7], off offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[4:5], v[6:7]
@@ -11573,9 +11569,8 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[16:17] offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11609,10 +11604,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_a_a(ptr addrspace(1) inreg %pt
 ; GFX950-NEXT:    v_sub_co_u32_e32 v0, vcc, v2, v4
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v1, vcc, v3, v5, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[0:1], v[2:3]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
@@ -11651,9 +11645,8 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
 ; GFX90A-NEXT:    v_pk_mov_b32 v[8:9], v[2:3], v[2:3] op_sel:[0,1]
 ; GFX90A-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX90A-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX90A-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
-; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX90A-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX90A-NEXT:    global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[16:17] offset:80 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
@@ -11683,10 +11676,9 @@ define void @global_atomic_usub_sat_i64_saddr_ret_av_av(ptr addrspace(1) inreg %
 ; GFX950-NEXT:    v_sub_co_u32_e32 v2, vcc, v8, v0
 ; GFX950-NEXT:    s_nop 1
 ; GFX950-NEXT:    v_subb_co_u32_e32 v3, vcc, v9, v1, vcc
-; GFX950-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[8:9]
 ; GFX950-NEXT:    s_nop 1
-; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    v_cndmask_b32_e64 v6, v2, 0, vcc
+; GFX950-NEXT:    v_cndmask_b32_e64 v7, v3, 0, vcc
 ; GFX950-NEXT:    global_atomic_cmpswap_x2 v[2:3], v4, v[6:9], s[0:1] offset:80 sc0
 ; GFX950-NEXT:    s_waitcnt vmcnt(0)
 ; GFX950-NEXT:    v_cmp_eq_u64_e32 vcc, v[2:3], v[8:9]
diff --git a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
index d326966e849e2..b72eba8cdb519 100644
--- a/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
+++ b/llvm/test/CodeGen/AMDGPU/addsub64_carry.ll
@@ -17,12 +17,9 @@ define %struct.uint96 @v_add64_32(i64 %val64A, i64 %val64B, i32 %val32) {
 ; CHECK-LABEL: v_add64_32:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v5, vcc, v0, v2
-; CHECK-NEXT:    v_addc_co_u32_e32 v6, vcc, v1, v3, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[5:6], v[0:1]
-; CHECK-NEXT:    v_mov_b32_e32 v0, v5
+; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
 ; CHECK-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v4, vcc
-; CHECK-NEXT:    v_mov_b32_e32 v1, v6
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %sum64 = add i64 %val64A, %val64B
   %obit = icmp ult i64 %sum64, %val64A
@@ -38,16 +35,14 @@ define <2 x i64> @v_uadd_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_add_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT:    v_add_co_u32_e64 v4, s[4:5], v0, v4
 ; CHECK-NEXT:    v_addc_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT:    v_add_co_u32_e32 v4, vcc, v0, v4
-; CHECK-NEXT:    v_addc_co_u32_e32 v5, vcc, v1, v5, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    v_addc_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, v2
+; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %pair = call {<2 x i64>, <2 x i1>} @llvm.uadd.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -63,16 +58,14 @@ define <2 x i64> @v_usub_v2i64(<2 x i64> %val0, <2 x i64> %val1, ptr %ptrval) {
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_sub_co_u32_e32 v6, vcc, v2, v6
+; CHECK-NEXT:    v_sub_co_u32_e64 v4, s[4:5], v0, v4
 ; CHECK-NEXT:    v_subb_co_u32_e32 v7, vcc, v3, v7, vcc
-; CHECK-NEXT:    v_sub_co_u32_e32 v4, vcc, v0, v4
-; CHECK-NEXT:    v_subb_co_u32_e32 v5, vcc, v1, v5, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[4:5], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[6:7], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v1, v0
+; CHECK-NEXT:    v_subb_co_u32_e64 v5, s[4:5], v1, v5, s[4:5]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[4:5]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    v_mov_b32_e32 v3, v2
+; CHECK-NEXT:    flat_store_dwordx4 v[8:9], v[4:7]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    s_setpc_b64 s[30:31]
   %pair = call {<2 x i64>, <2 x i1>} @llvm.usub.with.overflow.v2i64(<2 x i64> %val0, <2 x i64> %val1)
@@ -87,10 +80,9 @@ define i64 @v_uadd_i64(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_uadd_i64:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
-; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -109,7 +101,6 @@ define i64 @v_uadd_p1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    v_add_co_u32_e32 v0, vcc, 1, v0
 ; CHECK-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
@@ -147,10 +138,9 @@ define i64 @v_usub_p1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_usub_p1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, -1, v0
-; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, -1, v1, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_subrev_co_u32_e32 v0, vcc, 1, v0
+; CHECK-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -167,10 +157,9 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) {
 ; CHECK-LABEL: v_usub_n1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; CHECK-NEXT:    v_add_co_u32_e32 v2, vcc, 1, v0
-; CHECK-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; CHECK-NEXT:    v_subrev_co_u32_e32 v0, vcc, -1, v0
+; CHECK-NEXT:    v_subbrev_co_u32_e32 v1, vcc, -1, v1, vcc
+; CHECK-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -190,15 +179,13 @@ define i64 @v_usub_n1(i64 %val0, i64 %val1, ptr %ptrval) {
 define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B, i32 inreg %val32) {
 ; CHECK-LABEL: s_add64_32:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s6, s0, s2
-; CHECK-NEXT:    v_mov_b32_e32 v0, s0
-; CHECK-NEXT:    s_addc_u32 s7, s1, s3
-; CHECK-NEXT:    v_mov_b32_e32 v1, s1
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; CHECK-NEXT:    s_mov_b32 s0, s6
-; CHECK-NEXT:    s_cmp_lg_u64 vcc, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s2
+; CHECK-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; CHECK-NEXT:    s_addc_u32 s1, s1, s3
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
 ; CHECK-NEXT:    s_addc_u32 s2, s4, 0
-; CHECK-NEXT:    s_mov_b32 s1, s7
 ; CHECK-NEXT:    ; return to shader part epilog
   %sum64 = add i64 %val64A, %val64B
   %obit = icmp ult i64 %sum64, %val64A
@@ -212,24 +199,24 @@ define amdgpu_ps %struct.uint96 @s_add64_32(i64 inreg %val64A, i64 inreg %val64B
 define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_uadd_v2i64:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s6, s2, s6
-; CHECK-NEXT:    v_mov_b32_e32 v9, s3
-; CHECK-NEXT:    s_addc_u32 s7, s3, s7
-; CHECK-NEXT:    v_mov_b32_e32 v8, s2
-; CHECK-NEXT:    s_add_u32 s4, s0, s4
-; CHECK-NEXT:    v_mov_b32_e32 v7, s1
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[8:9]
-; CHECK-NEXT:    s_addc_u32 s5, s1, s5
-; CHECK-NEXT:    v_mov_b32_e32 v6, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[6:7]
-; CHECK-NEXT:    v_readfirstlane_b32 s2, v8
-; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; CHECK-NEXT:    v_readfirstlane_b32 s0, v6
-; CHECK-NEXT:    v_mov_b32_e32 v2, s4
-; CHECK-NEXT:    v_mov_b32_e32 v3, s5
-; CHECK-NEXT:    v_mov_b32_e32 v4, s6
-; CHECK-NEXT:    v_mov_b32_e32 v5, s7
+; CHECK-NEXT:    s_add_u32 s10, s2, s6
+; CHECK-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; CHECK-NEXT:    s_addc_u32 s8, s3, s7
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_add_u32 s0, s0, s4
+; CHECK-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; CHECK-NEXT:    s_addc_u32 s1, s1, s5
+; CHECK-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v7
+; CHECK-NEXT:    v_readfirstlane_b32 s2, v6
+; CHECK-NEXT:    v_mov_b32_e32 v4, s10
+; CHECK-NEXT:    v_mov_b32_e32 v5, s8
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_mov_b32 s3, s2
 ; CHECK-NEXT:    flat_store_dwordx4 v[0:1], v[2:5]
@@ -246,24 +233,24 @@ define amdgpu_ps <2 x i64> @s_uadd_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
 define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_v2i64:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_sub_u32 s6, s2, s6
-; CHECK-NEXT:    v_mov_b32_e32 v9, s3
-; CHECK-NEXT:    s_subb_u32 s7, s3, s7
-; CHECK-NEXT:    v_mov_b32_e32 v8, s2
-; CHECK-NEXT:    s_sub_u32 s4, s0, s4
-; CHECK-NEXT:    v_mov_b32_e32 v7, s1
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[8:9]
-; CHECK-NEXT:    s_subb_u32 s5, s1, s5
-; CHECK-NEXT:    v_mov_b32_e32 v6, s0
-; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[6:7]
-; CHECK-NEXT:    v_readfirstlane_b32 s2, v8
-; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
-; CHECK-NEXT:    v_readfirstlane_b32 s0, v6
-; CHECK-NEXT:    v_mov_b32_e32 v2, s4
-; CHECK-NEXT:    v_mov_b32_e32 v3, s5
-; CHECK-NEXT:    v_mov_b32_e32 v4, s6
-; CHECK-NEXT:    v_mov_b32_e32 v5, s7
+; CHECK-NEXT:    s_sub_u32 s10, s2, s6
+; CHECK-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; CHECK-NEXT:    s_subb_u32 s8, s3, s7
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_sub_u32 s0, s0, s4
+; CHECK-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[6:7], 0
+; CHECK-NEXT:    s_subb_u32 s1, s1, s5
+; CHECK-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
+; CHECK-NEXT:    v_readfirstlane_b32 s0, v7
+; CHECK-NEXT:    v_readfirstlane_b32 s2, v6
+; CHECK-NEXT:    v_mov_b32_e32 v4, s10
+; CHECK-NEXT:    v_mov_b32_e32 v5, s8
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_mov_b32 s3, s2
 ; CHECK-NEXT:    flat_store_dwordx4 v[0:1], v[2:5]
@@ -280,15 +267,15 @@ define amdgpu_ps <2 x i64> @s_usub_v2i64(<2 x i64> inreg %val0, <2 x i64> inreg
 define amdgpu_ps i64 @s_uadd_i64(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_uadd_i64:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s2, s0, s2
-; CHECK-NEXT:    v_mov_b32_e32 v3, s1
-; CHECK-NEXT:    s_addc_u32 s3, s1, s3
+; CHECK-NEXT:    s_add_u32 s0, s0, s2
+; CHECK-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; CHECK-NEXT:    s_addc_u32 s1, s1, s3
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
-; CHECK-NEXT:    v_mov_b32_e32 v5, s3
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v4, s2
-; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -305,10 +292,11 @@ define amdgpu_ps i64 @s_uadd_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_uadd_p1:
 ; CHECK:       ; %bb.0:
 ; CHECK-NEXT:    s_add_u32 s0, s0, 1
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
 ; CHECK-NEXT:    s_addc_u32 s1, s1, 0
-; CHECK-NEXT:    s_cmp_eq_u64 s[0:1], 0
-; CHECK-NEXT:    v_mov_b32_e32 v3, s1
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
 ; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
@@ -350,15 +338,15 @@ define amdgpu_ps i64 @s_uadd_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_p1:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s2, s0, -1
-; CHECK-NEXT:    v_mov_b32_e32 v3, s1
-; CHECK-NEXT:    s_addc_u32 s3, s1, -1
+; CHECK-NEXT:    s_sub_u32 s0, s0, 1
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; CHECK-NEXT:    s_subb_u32 s1, s1, 0
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
-; CHECK-NEXT:    v_mov_b32_e32 v5, s3
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v4, s2
-; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
@@ -374,15 +362,15 @@ define amdgpu_ps i64 @s_usub_p1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 define amdgpu_ps i64 @s_usub_n1(i64 inreg %val0, i64 inreg %val1, ptr %ptrval) {
 ; CHECK-LABEL: s_usub_n1:
 ; CHECK:       ; %bb.0:
-; CHECK-NEXT:    s_add_u32 s2, s0, 1
-; CHECK-NEXT:    v_mov_b32_e32 v3, s1
-; CHECK-NEXT:    s_addc_u32 s3, s1, 0
+; CHECK-NEXT:    s_sub_u32 s0, s0, -1
+; CHECK-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; CHECK-NEXT:    s_cmp_lg_u64 s[2:3], 0
+; CHECK-NEXT:    s_subb_u32 s1, s1, -1
 ; CHECK-NEXT:    v_mov_b32_e32 v2, s0
-; CHECK-NEXT:    v_mov_b32_e32 v5, s3
-; CHECK-NEXT:    v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
-; CHECK-NEXT:    v_mov_b32_e32 v4, s2
-; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v3, s1
+; CHECK-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; CHECK-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, s[0:1]
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index 51652a09863e0..d484111596e8b 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -691,7 +691,8 @@ define amdgpu_kernel void @uaddo32_vcc_user(ptr addrspace(1) %out, ptr addrspace
 ; GCN-ISEL-LABEL: name:   suaddo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: S_ADD_U64_PSEUDO
+; GCN-ISEL: S_UADDO_PSEUDO
+; GCN-ISEL: S_ADD_CO_PSEUDO
 
 define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a, i64 %b) #0 {
 ; CISI-LABEL: suaddo64:
@@ -700,21 +701,23 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s11, 0xf000
 ; CISI-NEXT:    s_mov_b32 s10, -1
 ; CISI-NEXT:    s_waitcnt lgkmcnt(0)
-; CISI-NEXT:    s_add_u32 s6, s4, s6
-; CISI-NEXT:    v_mov_b32_e32 v0, s4
-; CISI-NEXT:    s_addc_u32 s7, s5, s7
-; CISI-NEXT:    v_mov_b32_e32 v1, s5
-; CISI-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; CISI-NEXT:    v_mov_b32_e32 v2, s6
+; CISI-NEXT:    s_add_u32 s4, s4, s6
+; CISI-NEXT:    s_cselect_b64 s[12:13], -1, 0
+; CISI-NEXT:    s_or_b32 s6, s12, s13
+; CISI-NEXT:    s_cmp_lg_u32 s6, 0
+; CISI-NEXT:    s_addc_u32 s5, s5, s7
 ; CISI-NEXT:    s_mov_b32 s8, s0
 ; CISI-NEXT:    s_mov_b32 s9, s1
+; CISI-NEXT:    v_mov_b32_e32 v0, s4
+; CISI-NEXT:    v_mov_b32_e32 v1, s5
+; CISI-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s10
 ; CISI-NEXT:    s_mov_b32 s3, s11
-; CISI-NEXT:    v_mov_b32_e32 v3, s7
-; CISI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CISI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; CISI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; CISI-NEXT:    s_waitcnt expcnt(0)
+; CISI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
 ; CISI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
 ; CISI-NEXT:    s_endpgm
 ;
@@ -722,37 +725,37 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_add_u32 s2, s4, s6
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_add_u32 s0, s4, s6
-; VI-NEXT:    v_mov_b32_e32 v4, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    s_addc_u32 s1, s5, s7
-; VI-NEXT:    v_mov_b32_e32 v5, s5
-; VI-NEXT:    v_mov_b32_e32 v7, s1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v6, s0
-; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT:    s_addc_u32 s0, s5, s7
+; VI-NEXT:    v_mov_b32_e32 v4, s2
+; VI-NEXT:    v_mov_b32_e32 v5, s0
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; VI-NEXT:    flat_store_byte v[2:3], v0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: suaddo64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_add_u32 s0, s12, s14
-; GFX9-NEXT:    v_mov_b32_e32 v0, s12
-; GFX9-NEXT:    v_mov_b32_e32 v1, s13
-; GFX9-NEXT:    s_addc_u32 s1, s13, s15
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT:    s_add_u32 s2, s12, s14
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT:    s_addc_u32 s0, s13, s15
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT:    global_store_byte v2, v3, s[10:11]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX1010-LABEL: suaddo64:
@@ -761,10 +764,12 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1010-NEXT:    s_add_u32 s0, s12, s14
-; GFX1010-NEXT:    s_addc_u32 s1, s13, s15
+; GFX1010-NEXT:    s_cselect_b32 s1, -1, 0
 ; GFX1010-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1010-NEXT:    s_cmp_lg_u32 s1, 0
+; GFX1010-NEXT:    s_addc_u32 s1, s13, s15
+; GFX1010-NEXT:    s_cselect_b32 s0, -1, 0
 ; GFX1010-NEXT:    v_mov_b32_e32 v1, s1
-; GFX1010-NEXT:    v_cmp_lt_u64_e64 s0, s[0:1], s[12:13]
 ; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s0
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[10:11]
@@ -775,11 +780,13 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1030W32-NEXT:    s_add_u32 s6, s4, s6
-; GFX1030W32-NEXT:    s_addc_u32 s7, s5, s7
-; GFX1030W32-NEXT:    v_mov_b32_e32 v0, s6
-; GFX1030W32-NEXT:    v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1030W32-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1030W32-NEXT:    s_add_u32 s4, s4, s6
+; GFX1030W32-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX1030W32-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1030W32-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1030W32-NEXT:    s_addc_u32 s5, s5, s7
+; GFX1030W32-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX1030W32-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
@@ -790,11 +797,13 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1030W64-NEXT:    s_add_u32 s6, s4, s6
-; GFX1030W64-NEXT:    s_addc_u32 s7, s5, s7
-; GFX1030W64-NEXT:    v_mov_b32_e32 v0, s6
-; GFX1030W64-NEXT:    v_cmp_lt_u64_e64 s[4:5], s[6:7], s[4:5]
-; GFX1030W64-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1030W64-NEXT:    s_add_u32 s4, s4, s6
+; GFX1030W64-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; GFX1030W64-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1030W64-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX1030W64-NEXT:    s_addc_u32 s5, s5, s7
+; GFX1030W64-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1030W64-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
@@ -804,12 +813,13 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_add_u32 s6, s4, s6
-; GFX11-NEXT:    s_addc_u32 s7, s5, s7
-; GFX11-NEXT:    v_mov_b32_e32 v0, s6
-; GFX11-NEXT:    v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    s_add_u32 s4, s4, s6
+; GFX11-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX11-NEXT:    v_mov_b32_e32 v0, s4
+; GFX11-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX11-NEXT:    s_addc_u32 s5, s5, s7
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -819,12 +829,14 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-LABEL: suaddo64:
 ; GFX1250:       ; %bb.0:
 ; GFX1250-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_add_nc_u64 s[6:7], s[4:5], s[6:7]
-; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1250-NEXT:    v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1250-NEXT:    v_mov_b64_e32 v[0:1], s[6:7]
+; GFX1250-NEXT:    s_add_co_u32 s4, s4, s6
+; GFX1250-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX1250-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s4
+; GFX1250-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1250-NEXT:    s_add_co_ci_u32 s5, s5, s7
+; GFX1250-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX1250-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -841,7 +853,8 @@ define amdgpu_kernel void @suaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GCN-ISEL-LABEL: name:   vuaddo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_ADD_U64_PSEUDO
+; GCN-ISEL: V_ADD_CO_U32_e64
+; GCN-ISEL: V_ADDC_U32_e64
 
 define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
 ; CISI-LABEL: vuaddo64:
@@ -854,9 +867,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s4, s0
 ; CISI-NEXT:    v_mov_b32_e32 v1, s9
 ; CISI-NEXT:    v_add_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; CISI-NEXT:    v_cmp_gt_u64_e32 vcc, s[8:9], v[0:1]
 ; CISI-NEXT:    s_mov_b32 s5, s1
+; CISI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s6
@@ -876,7 +888,6 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI-NEXT:    v_mov_b32_e32 v6, s5
 ; VI-NEXT:    v_add_u32_e32 v5, vcc, s4, v0
 ; VI-NEXT:    v_addc_u32_e32 v6, vcc, 0, v6, vcc
-; VI-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[5:6]
 ; VI-NEXT:    v_mov_b32_e32 v2, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s2
 ; VI-NEXT:    v_mov_b32_e32 v4, s3
@@ -894,7 +905,6 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s6, v0
 ; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v2, v0, s[2:3]
@@ -909,8 +919,7 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1010-NEXT:    v_add_co_u32 v0, s4, s6, v0
 ; GFX1010-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1010-NEXT:    s_endpgm
@@ -923,9 +932,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W32-NEXT:    v_add_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s4
-; GFX1030W32-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1030W32-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W32-NEXT:    s_endpgm
@@ -938,9 +946,8 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W64-NEXT:    v_add_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
-; GFX1030W64-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX1030W64-NEXT:    v_add_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
+; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W64-NEXT:    s_endpgm
@@ -955,10 +962,9 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_co_u32 v0, s4, s6, v0
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    global_store_b8 v2, v3, s[2:3]
@@ -969,16 +975,17 @@ define amdgpu_kernel void @vuaddo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    s_load_b64 s[6:7], s[4:5], 0x34
 ; GFX1250-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX1250-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT:    v_add_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT:    v_cmp_gt_u64_e32 vcc_lo, s[6:7], v[2:3]
-; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_add_co_u32 v0, s4, s6, v0
+; GFX1250-NEXT:    v_add_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1250-NEXT:    s_clause 0x1
-; GFX1250-NEXT:    global_store_b64 v1, v[2:3], s[0:1]
-; GFX1250-NEXT:    global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT:    global_store_b8 v2, v3, s[2:3]
 ; GFX1250-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
@@ -1671,7 +1678,8 @@ define amdgpu_kernel void @usubo32_vcc_user(ptr addrspace(1) %out, ptr addrspace
 ; GCN-ISEL-LABEL: name:   susubo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: S_SUB_U64_PSEUDO
+; GCN-ISEL: S_USUBO_PSEUDO
+; GCN-ISEL: S_SUB_CO_PSEUDO
 
 define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a, i64 %b) #0 {
 ; CISI-LABEL: susubo64:
@@ -1680,21 +1688,23 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s11, 0xf000
 ; CISI-NEXT:    s_mov_b32 s10, -1
 ; CISI-NEXT:    s_waitcnt lgkmcnt(0)
-; CISI-NEXT:    s_sub_u32 s6, s4, s6
-; CISI-NEXT:    v_mov_b32_e32 v0, s4
-; CISI-NEXT:    s_subb_u32 s7, s5, s7
-; CISI-NEXT:    v_mov_b32_e32 v1, s5
-; CISI-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; CISI-NEXT:    v_mov_b32_e32 v2, s6
+; CISI-NEXT:    s_sub_u32 s4, s4, s6
+; CISI-NEXT:    s_cselect_b64 s[12:13], -1, 0
+; CISI-NEXT:    s_or_b32 s6, s12, s13
+; CISI-NEXT:    s_cmp_lg_u32 s6, 0
+; CISI-NEXT:    s_subb_u32 s5, s5, s7
 ; CISI-NEXT:    s_mov_b32 s8, s0
 ; CISI-NEXT:    s_mov_b32 s9, s1
+; CISI-NEXT:    v_mov_b32_e32 v0, s4
+; CISI-NEXT:    v_mov_b32_e32 v1, s5
+; CISI-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s10
 ; CISI-NEXT:    s_mov_b32 s3, s11
-; CISI-NEXT:    v_mov_b32_e32 v3, s7
-; CISI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CISI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; CISI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; CISI-NEXT:    s_waitcnt expcnt(0)
+; CISI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
 ; CISI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
 ; CISI-NEXT:    s_endpgm
 ;
@@ -1702,37 +1712,37 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_sub_u32 s2, s4, s6
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_sub_u32 s0, s4, s6
-; VI-NEXT:    v_mov_b32_e32 v4, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    s_subb_u32 s1, s5, s7
-; VI-NEXT:    v_mov_b32_e32 v5, s5
-; VI-NEXT:    v_mov_b32_e32 v7, s1
-; VI-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v6, s0
-; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT:    s_subb_u32 s0, s5, s7
+; VI-NEXT:    v_mov_b32_e32 v4, s2
+; VI-NEXT:    v_mov_b32_e32 v5, s0
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; VI-NEXT:    flat_store_byte v[2:3], v0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: susubo64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_sub_u32 s0, s12, s14
-; GFX9-NEXT:    v_mov_b32_e32 v0, s12
-; GFX9-NEXT:    v_mov_b32_e32 v1, s13
-; GFX9-NEXT:    s_subb_u32 s1, s13, s15
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT:    s_sub_u32 s2, s12, s14
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT:    s_subb_u32 s0, s13, s15
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT:    global_store_byte v2, v3, s[10:11]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX1010-LABEL: susubo64:
@@ -1741,10 +1751,12 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1010-NEXT:    s_sub_u32 s0, s12, s14
-; GFX1010-NEXT:    s_subb_u32 s1, s13, s15
+; GFX1010-NEXT:    s_cselect_b32 s1, -1, 0
 ; GFX1010-NEXT:    v_mov_b32_e32 v0, s0
+; GFX1010-NEXT:    s_cmp_lg_u32 s1, 0
+; GFX1010-NEXT:    s_subb_u32 s1, s13, s15
+; GFX1010-NEXT:    s_cselect_b32 s0, -1, 0
 ; GFX1010-NEXT:    v_mov_b32_e32 v1, s1
-; GFX1010-NEXT:    v_cmp_gt_u64_e64 s0, s[0:1], s[12:13]
 ; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s0
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[10:11]
@@ -1755,11 +1767,13 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1030W32-NEXT:    s_sub_u32 s6, s4, s6
-; GFX1030W32-NEXT:    s_subb_u32 s7, s5, s7
-; GFX1030W32-NEXT:    v_mov_b32_e32 v0, s6
-; GFX1030W32-NEXT:    v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1030W32-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1030W32-NEXT:    s_sub_u32 s4, s4, s6
+; GFX1030W32-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX1030W32-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1030W32-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1030W32-NEXT:    s_subb_u32 s5, s5, s7
+; GFX1030W32-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX1030W32-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
@@ -1770,11 +1784,13 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX1030W64-NEXT:    s_sub_u32 s6, s4, s6
-; GFX1030W64-NEXT:    s_subb_u32 s7, s5, s7
-; GFX1030W64-NEXT:    v_mov_b32_e32 v0, s6
-; GFX1030W64-NEXT:    v_cmp_gt_u64_e64 s[4:5], s[6:7], s[4:5]
-; GFX1030W64-NEXT:    v_mov_b32_e32 v1, s7
+; GFX1030W64-NEXT:    s_sub_u32 s4, s4, s6
+; GFX1030W64-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; GFX1030W64-NEXT:    v_mov_b32_e32 v0, s4
+; GFX1030W64-NEXT:    s_cmp_lg_u64 s[8:9], 0
+; GFX1030W64-NEXT:    s_subb_u32 s5, s5, s7
+; GFX1030W64-NEXT:    v_mov_b32_e32 v1, s5
+; GFX1030W64-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
@@ -1784,12 +1800,13 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_sub_u32 s6, s4, s6
-; GFX11-NEXT:    s_subb_u32 s7, s5, s7
-; GFX11-NEXT:    v_mov_b32_e32 v0, s6
-; GFX11-NEXT:    v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    s_sub_u32 s4, s4, s6
+; GFX11-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX11-NEXT:    v_mov_b32_e32 v0, s4
+; GFX11-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX11-NEXT:    s_subb_u32 s5, s5, s7
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -1799,12 +1816,14 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-LABEL: susubo64:
 ; GFX1250:       ; %bb.0:
 ; GFX1250-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
-; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_sub_nc_u64 s[6:7], s[4:5], s[6:7]
-; GFX1250-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX1250-NEXT:    v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX1250-NEXT:    v_mov_b64_e32 v[0:1], s[6:7]
+; GFX1250-NEXT:    s_sub_co_u32 s4, s4, s6
+; GFX1250-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX1250-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v0, s4
+; GFX1250-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX1250-NEXT:    s_sub_co_ci_u32 s5, s5, s7
+; GFX1250-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX1250-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -1821,7 +1840,8 @@ define amdgpu_kernel void @susubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GCN-ISEL-LABEL: name:   vusubo64
 ; GCN-ISEL-LABEL: body:
 ; GCN-ISEL-LABEL: bb.0
-; GCN-ISEL: V_SUB_U64_PSEUDO
+; GCN-ISEL: V_SUB_CO_U32_e64
+; GCN-ISEL: V_SUBB_U32_e64
 
 define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %carryout, i64 %a) #0 {
 ; CISI-LABEL: vusubo64:
@@ -1834,9 +1854,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; CISI-NEXT:    s_mov_b32 s4, s0
 ; CISI-NEXT:    v_mov_b32_e32 v1, s9
 ; CISI-NEXT:    v_sub_i32_e32 v0, vcc, s8, v0
-; CISI-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CISI-NEXT:    v_cmp_lt_u64_e32 vcc, s[8:9], v[0:1]
 ; CISI-NEXT:    s_mov_b32 s5, s1
+; CISI-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
 ; CISI-NEXT:    s_mov_b32 s0, s2
 ; CISI-NEXT:    s_mov_b32 s1, s3
 ; CISI-NEXT:    s_mov_b32 s2, s6
@@ -1856,7 +1875,6 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; VI-NEXT:    v_mov_b32_e32 v6, s5
 ; VI-NEXT:    v_sub_u32_e32 v5, vcc, s4, v0
 ; VI-NEXT:    v_subbrev_u32_e32 v6, vcc, 0, v6, vcc
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[5:6]
 ; VI-NEXT:    v_mov_b32_e32 v2, s1
 ; VI-NEXT:    v_mov_b32_e32 v3, s2
 ; VI-NEXT:    v_mov_b32_e32 v4, s3
@@ -1874,7 +1892,6 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, s6, v0
 ; GFX9-NEXT:    v_subbrev_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v2, v0, s[2:3]
@@ -1889,8 +1906,7 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1010-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1010-NEXT:    v_sub_co_u32 v0, s4, s6, v0
 ; GFX1010-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
-; GFX1010-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1010-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1010-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1010-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1010-NEXT:    s_endpgm
@@ -1903,9 +1919,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W32-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W32-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W32-NEXT:    v_sub_co_u32 v0, s4, s6, v0
-; GFX1030W32-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
-; GFX1030W32-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX1030W32-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1030W32-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1030W32-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W32-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W32-NEXT:    s_endpgm
@@ -1918,9 +1933,8 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1030W64-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1030W64-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX1030W64-NEXT:    v_sub_co_u32 v0, s[4:5], s6, v0
-; GFX1030W64-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s[4:5]
-; GFX1030W64-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX1030W64-NEXT:    v_sub_co_ci_u32_e64 v1, s[4:5], s7, 0, s[4:5]
+; GFX1030W64-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
 ; GFX1030W64-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX1030W64-NEXT:    global_store_byte v2, v3, s[2:3]
 ; GFX1030W64-NEXT:    s_endpgm
@@ -1935,10 +1949,9 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_sub_co_u32 v0, s4, s6, v0
-; GFX11-NEXT:    v_sub_co_ci_u32_e64 v1, null, s7, 0, s4
+; GFX11-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    global_store_b8 v2, v3, s[2:3]
@@ -1949,16 +1962,17 @@ define amdgpu_kernel void @vusubo64(ptr addrspace(1) %out, ptr addrspace(1) %car
 ; GFX1250-NEXT:    s_clause 0x1
 ; GFX1250-NEXT:    s_load_b64 s[6:7], s[4:5], 0x34
 ; GFX1250-NEXT:    s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT:    v_mov_b32_e32 v1, 0
 ; GFX1250-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; GFX1250-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX1250-NEXT:    s_wait_kmcnt 0x0
-; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX1250-NEXT:    v_sub_nc_u64_e32 v[2:3], s[6:7], v[0:1]
-; GFX1250-NEXT:    v_cmp_lt_u64_e32 vcc_lo, s[6:7], v[2:3]
-; GFX1250-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1250-NEXT:    v_sub_co_u32 v0, s4, s6, v0
+; GFX1250-NEXT:    v_sub_co_ci_u32_e64 v1, s4, s7, 0, s4
+; GFX1250-NEXT:    s_delay_alu instid0(VALU_DEP_1)
+; GFX1250-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX1250-NEXT:    s_clause 0x1
-; GFX1250-NEXT:    global_store_b64 v1, v[2:3], s[0:1]
-; GFX1250-NEXT:    global_store_b8 v1, v0, s[2:3]
+; GFX1250-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
+; GFX1250-NEXT:    global_store_b8 v2, v3, s[2:3]
 ; GFX1250-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index ddec6af0af69e..3c40fd23a1c44 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -206,8 +206,11 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    s_add_u32 s18, s16, 1
-; GCN-IR-NEXT:    s_addc_u32 s19, s17, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
+; GCN-IR-NEXT:    s_cselect_b64 s[10:11], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s10, s10, s11
+; GCN-IR-NEXT:    s_cmp_lg_u32 s10, 0
+; GCN-IR-NEXT:    s_addc_u32 s10, s17, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[10:11], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s16, 63, s16
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[12:13], s16
@@ -217,9 +220,9 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_add_u32 s18, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s19, s3, -1
 ; GCN-IR-NEXT:    s_not_b64 s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_add_u32 s12, s8, s20
-; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s14, s8, s20
+; GCN-IR-NEXT:    s_addc_u32 s15, s9, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -227,19 +230,22 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_lshr_b32 s8, s11, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[14:15], s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
 ; GCN-IR-NEXT:    s_sub_u32 s8, s18, s16
 ; GCN-IR-NEXT:    s_subb_u32 s8, s19, s17
-; GCN-IR-NEXT:    s_ashr_i32 s14, s8, 31
-; GCN-IR-NEXT:    s_mov_b32 s15, s14
-; GCN-IR-NEXT:    s_and_b32 s8, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s16, s16, s14
-; GCN-IR-NEXT:    s_subb_u32 s17, s17, s15
-; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[8:9]
+; GCN-IR-NEXT:    s_ashr_i32 s12, s8, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s8, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[20:21], s[12:13], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s16, s16, s20
+; GCN-IR-NEXT:    s_subb_u32 s17, s17, s21
+; GCN-IR-NEXT:    s_add_u32 s14, s14, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s20, s20, s21
+; GCN-IR-NEXT:    s_cmp_lg_u32 s20, 0
+; GCN-IR-NEXT:    s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[8:9]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow7
@@ -389,25 +395,25 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_sdiv:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v13, 31, v3
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v1, v12, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v13
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v13
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v11, 31, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v1, v10, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v11
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v11
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v6
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v7
-; GCN-IR-NEXT:    v_min_u32_e32 v11, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v10, v11
+; GCN-IR-NEXT:    v_min_u32_e32 v9, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v8, v9
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[6:7], 0, 0, s[6:7]
@@ -416,70 +422,69 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v14, v12
-; GCN-IR-NEXT:    v_mov_b32_e32 v15, v13
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v11
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v7, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v6, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[6:7], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v4, v10
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[6:7], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v4, v11
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[6:7], v14
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v4, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, v4, v9
+; GCN-IR-NEXT:    v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v16, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v17, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, 1, v16
+; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB1_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v13, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v15, v14
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v11, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v13, v12
 ; GCN-IR-NEXT:    v_xor_b32_e32 v3, v4, v0
 ; GCN-IR-NEXT:    v_xor_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v3, v0
@@ -1295,34 +1300,37 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[2:3], s[4:5]
 ; GCN-IR-NEXT:    s_sub_u32 s2, s2, s4
 ; GCN-IR-NEXT:    s_subb_u32 s3, s3, s4
-; GCN-IR-NEXT:    s_flbit_i32_b64 s14, s[2:3]
-; GCN-IR-NEXT:    s_add_u32 s10, s14, 0xffffffc5
+; GCN-IR-NEXT:    s_flbit_i32_b64 s16, s[2:3]
+; GCN-IR-NEXT:    s_add_u32 s10, s16, 0xffffffc5
 ; GCN-IR-NEXT:    s_addc_u32 s11, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[10:11], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[10:11], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[10:11], 63
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[8:9], s[12:13]
 ; GCN-IR-NEXT:    s_and_b64 s[8:9], s[12:13], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[16:17]
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[14:15]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    s_add_u32 s12, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s13, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[12:13], 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s8, s8, s9
+; GCN-IR-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT:    s_addc_u32 s8, s11, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], 24, s10
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[12:13], 24, s12
-; GCN-IR-NEXT:    s_add_u32 s16, s2, -1
-; GCN-IR-NEXT:    s_addc_u32 s17, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s10, 58, s14
-; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
+; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
+; GCN-IR-NEXT:    s_sub_u32 s16, 58, s16
+; GCN-IR-NEXT:    s_subb_u32 s17, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -1330,19 +1338,22 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_lshr_b32 s6, s9, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[6:7]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s6, s16, s12
-; GCN-IR-NEXT:    s_subb_u32 s6, s17, s13
-; GCN-IR-NEXT:    s_ashr_i32 s14, s6, 31
-; GCN-IR-NEXT:    s_mov_b32 s15, s14
-; GCN-IR-NEXT:    s_and_b32 s6, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s6, s14, s12
+; GCN-IR-NEXT:    s_subb_u32 s6, s15, s13
+; GCN-IR-NEXT:    s_ashr_i32 s10, s6, 31
+; GCN-IR-NEXT:    s_mov_b32 s11, s10
+; GCN-IR-NEXT:    s_and_b32 s6, s10, 1
+; GCN-IR-NEXT:    s_and_b64 s[18:19], s[10:11], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT:    s_add_u32 s16, s16, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s18, s18, s19
+; GCN-IR-NEXT:    s_cmp_lg_u32 s18, 0
+; GCN-IR-NEXT:    s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_3
 ; GCN-IR-NEXT:  .LBB10_4: ; %Flow6
@@ -1474,17 +1485,17 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1492,69 +1503,68 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, 24, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v14, vcc, 58, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB11_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB11_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 24, %x
   ret i64 %result
@@ -1667,17 +1677,17 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1686,70 +1696,69 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v14, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB12_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 32768, %x
   ret i64 %result
@@ -1769,20 +1778,20 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, v1, v10, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v8
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v0, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, v1, v8, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v0, v4
 ; GCN-IR-NEXT:    v_add_i32_e64 v0, s[4:5], 32, v0
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v1, v5
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v0, v1
-; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 48, v8
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v0, v1
+; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 48, v6
 ; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[0:1]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v8
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
@@ -1792,61 +1801,60 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 63, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[4:5], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffcf, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[4:5], v[4:5], v7
+; GCN-IR-NEXT:    v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
-; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s12, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
-; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s10, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v6, v0
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v2
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 0x8000, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v4, v6
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v7, v1
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v3
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v2
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v3, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v2, v0
 ; GCN-IR-NEXT:  .LBB13_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v10
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v11
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v8
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v9
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v9, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 %x, 32768
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 2d95875cad882..fd067024fac1f 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -170,35 +170,38 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b64 s10, s[6:7]
-; GCN-IR-NEXT:    s_flbit_i32_b64 s18, s[2:3]
+; GCN-IR-NEXT:    s_flbit_i32_b64 s16, s[2:3]
 ; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[12:13]
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s18
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s16
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
 ; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
 ; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s9, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s8, s8, s9
+; GCN-IR-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT:    s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s14
-; GCN-IR-NEXT:    s_add_u32 s16, s6, -1
-; GCN-IR-NEXT:    s_addc_u32 s17, s7, -1
+; GCN-IR-NEXT:    s_add_u32 s14, s6, -1
+; GCN-IR-NEXT:    s_addc_u32 s15, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[4:5], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s10, s4, s18
-; GCN-IR-NEXT:    s_addc_u32 s11, s5, 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s16, s4, s16
+; GCN-IR-NEXT:    s_addc_u32 s17, s5, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -206,19 +209,22 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s4, s16, s12
-; GCN-IR-NEXT:    s_subb_u32 s4, s17, s13
-; GCN-IR-NEXT:    s_ashr_i32 s14, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s15, s14
-; GCN-IR-NEXT:    s_and_b32 s4, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s4, s14, s12
+; GCN-IR-NEXT:    s_subb_u32 s4, s15, s13
+; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s11, s10
+; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
+; GCN-IR-NEXT:    s_and_b64 s[18:19], s[10:11], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT:    s_add_u32 s16, s16, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s18, s18, s19
+; GCN-IR-NEXT:    s_cmp_lg_u32 s18, 0
+; GCN-IR-NEXT:    s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow7
@@ -373,12 +379,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_srem:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v14, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v14
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v14, vcc
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
 ; GCN-IR-NEXT:    v_xor_b32_e32 v2, v2, v4
 ; GCN-IR-NEXT:    v_xor_b32_e32 v3, v3, v4
 ; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v2, v4
@@ -386,12 +392,12 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT:    v_min_u32_e32 v11, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v10, v11
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -400,7 +406,7 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v15, v14
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
@@ -408,54 +414,53 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, v6, v11
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT:    v_addc_u32_e64 v17, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v16, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v17, v11, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v9, v11, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, 1, v16
+; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, 0, v17, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
@@ -469,10 +474,10 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v15
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v15, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v13
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, %y
   ret i64 %result
@@ -1148,35 +1153,38 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[8:9], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[8:9]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[2:3], s[10:11]
-; GCN-IR-NEXT:    s_flbit_i32_b64 s20, s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s14, s12, s20
+; GCN-IR-NEXT:    s_flbit_i32_b64 s18, s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s14, s12, s18
 ; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[14:15], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[14:15], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[14:15], 63
 ; GCN-IR-NEXT:    s_or_b64 s[16:17], s[10:11], s[16:17]
 ; GCN-IR-NEXT:    s_and_b64 s[10:11], s[16:17], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s11, 0, s7
 ; GCN-IR-NEXT:    s_cselect_b32 s10, 0, s6
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[20:21]
 ; GCN-IR-NEXT:    s_mov_b64 s[2:3], 0
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    s_add_u32 s16, s14, 1
-; GCN-IR-NEXT:    s_addc_u32 s17, s15, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[16:17], 0
+; GCN-IR-NEXT:    s_cselect_b64 s[10:11], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s10, s10, s11
+; GCN-IR-NEXT:    s_cmp_lg_u32 s10, 0
+; GCN-IR-NEXT:    s_addc_u32 s10, s15, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[10:11], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s14, 63, s14
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[6:7], s14
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[6:7], s16
-; GCN-IR-NEXT:    s_add_u32 s18, s8, -1
-; GCN-IR-NEXT:    s_addc_u32 s19, s9, -1
+; GCN-IR-NEXT:    s_add_u32 s16, s8, -1
+; GCN-IR-NEXT:    s_addc_u32 s17, s9, -1
 ; GCN-IR-NEXT:    s_not_b64 s[2:3], s[12:13]
-; GCN-IR-NEXT:    s_add_u32 s12, s2, s20
-; GCN-IR-NEXT:    s_addc_u32 s13, s3, 0
-; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
+; GCN-IR-NEXT:    s_add_u32 s18, s2, s18
+; GCN-IR-NEXT:    s_addc_u32 s19, s3, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s3, 0
 ; GCN-IR-NEXT:  .LBB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -1184,19 +1192,22 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    s_lshr_b32 s2, s11, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[2:3]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[16:17], s[10:11]
-; GCN-IR-NEXT:    s_sub_u32 s2, s18, s14
-; GCN-IR-NEXT:    s_subb_u32 s2, s19, s15
-; GCN-IR-NEXT:    s_ashr_i32 s16, s2, 31
-; GCN-IR-NEXT:    s_mov_b32 s17, s16
-; GCN-IR-NEXT:    s_and_b32 s2, s16, 1
-; GCN-IR-NEXT:    s_and_b64 s[16:17], s[16:17], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s14, s14, s16
-; GCN-IR-NEXT:    s_subb_u32 s15, s15, s17
-; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
-; GCN-IR-NEXT:    s_mov_b64 s[16:17], s[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[12:13], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s2, s16, s14
+; GCN-IR-NEXT:    s_subb_u32 s2, s17, s15
+; GCN-IR-NEXT:    s_ashr_i32 s12, s2, 31
+; GCN-IR-NEXT:    s_mov_b32 s13, s12
+; GCN-IR-NEXT:    s_and_b32 s2, s12, 1
+; GCN-IR-NEXT:    s_and_b64 s[20:21], s[12:13], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s14, s14, s20
+; GCN-IR-NEXT:    s_subb_u32 s15, s15, s21
+; GCN-IR-NEXT:    s_add_u32 s18, s18, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s20, s20, s21
+; GCN-IR-NEXT:    s_cmp_lg_u32 s20, 0
+; GCN-IR-NEXT:    s_addc_u32 s19, s19, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[20:21], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[2:3]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_3
 ; GCN-IR-NEXT:  .LBB8_4: ; %Flow7
@@ -1463,34 +1474,37 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[2:3], s[8:9]
 ; GCN-IR-NEXT:    s_sub_u32 s4, s2, s8
 ; GCN-IR-NEXT:    s_subb_u32 s5, s3, s8
-; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[4:5]
-; GCN-IR-NEXT:    s_add_u32 s2, s12, 0xffffffc5
+; GCN-IR-NEXT:    s_flbit_i32_b64 s14, s[4:5]
+; GCN-IR-NEXT:    s_add_u32 s2, s14, 0xffffffc5
 ; GCN-IR-NEXT:    s_addc_u32 s3, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[4:5], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[2:3], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[2:3], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 63
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[8:9], s[10:11]
 ; GCN-IR-NEXT:    s_and_b64 s[8:9], s[10:11], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    s_add_u32 s8, s2, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s3, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
+; GCN-IR-NEXT:    s_cselect_b64 s[10:11], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s9, s10, s11
+; GCN-IR-NEXT:    s_cmp_lg_u32 s9, 0
+; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[10:11], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s2, 63, s2
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], 24, s2
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s8
-; GCN-IR-NEXT:    s_add_u32 s14, s4, -1
-; GCN-IR-NEXT:    s_addc_u32 s15, s5, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_add_u32 s12, s4, -1
+; GCN-IR-NEXT:    s_addc_u32 s13, s5, -1
+; GCN-IR-NEXT:    s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -1498,19 +1512,22 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_lshr_b32 s6, s3, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[2:3], 1
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[6:7]
-; GCN-IR-NEXT:    s_or_b64 s[2:3], s[12:13], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s6, s14, s10
-; GCN-IR-NEXT:    s_subb_u32 s6, s15, s11
-; GCN-IR-NEXT:    s_ashr_i32 s12, s6, 31
-; GCN-IR-NEXT:    s_mov_b32 s13, s12
-; GCN-IR-NEXT:    s_and_b32 s6, s12, 1
-; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT:    s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[6:7]
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[8:9], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s6, s12, s10
+; GCN-IR-NEXT:    s_subb_u32 s6, s13, s11
+; GCN-IR-NEXT:    s_ashr_i32 s8, s6, 31
+; GCN-IR-NEXT:    s_mov_b32 s9, s8
+; GCN-IR-NEXT:    s_and_b32 s6, s8, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT:    s_add_u32 s14, s14, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s16, s16, s17
+; GCN-IR-NEXT:    s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT:    s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_3
 ; GCN-IR-NEXT:  .LBB10_4: ; %Flow6
@@ -1649,9 +1666,9 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1665,53 +1682,52 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 58, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB11_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1840,9 +1856,9 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1857,54 +1873,53 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1939,20 +1954,20 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_srem_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v10
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v10, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v8
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
@@ -1963,51 +1978,50 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_addc_u32_e64 v13, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -2016,10 +2030,10 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[4:5], 15
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v13
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, 32768
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/uaddo.ll b/llvm/test/CodeGen/AMDGPU/uaddo.ll
index e1574dcd45462..bb5918b256d28 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddo.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddo.ll
@@ -14,15 +14,16 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
-; SI-NEXT:    s_add_u32 s0, s2, s8
-; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    s_add_u32 s2, s2, s8
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    s_addc_u32 s1, s3, s9
+; SI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT:    s_or_b32 s0, s0, s1
+; SI-NEXT:    s_cmp_lg_u32 s0, 0
+; SI-NEXT:    s_addc_u32 s3, s3, s9
+; SI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
 ; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -33,15 +34,15 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_add_u32 s0, s2, s4
-; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_add_u32 s2, s2, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT:    s_addc_u32 s3, s3, s5
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    s_addc_u32 s1, s3, s5
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s2, v2
 ; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
@@ -52,14 +53,14 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-NEXT:    s_add_u32 s4, s2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-NEXT:    s_addc_u32 s5, s3, s7
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s4, v0
+; GFX9-NEXT:    s_add_u32 s6, s2, s6
+; GFX9-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_addc_u32 s4, s3, s7
+; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s6, v0
 ; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9-NEXT:    s_endpgm
@@ -71,12 +72,14 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX10-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_add_u32 s4, s2, s6
-; GFX10-NEXT:    s_addc_u32 s5, s3, s7
-; GFX10-NEXT:    v_cmp_lt_u64_e64 s2, s[4:5], s[2:3]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-NEXT:    v_add_co_u32 v0, s2, s4, v0
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, s2, s5, 0, s2
+; GFX10-NEXT:    s_add_u32 s2, s2, s6
+; GFX10-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX10-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX10-NEXT:    s_addc_u32 s3, s3, s7
+; GFX10-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
+; GFX10-NEXT:    v_add_co_u32 v0, s2, s2, v0
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, s2, s3, 0, s2
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX10-NEXT:    s_endpgm
 ;
@@ -87,14 +90,16 @@ define amdgpu_kernel void @s_uaddo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX11-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_add_u32 s4, s2, s4
-; GFX11-NEXT:    s_addc_u32 s5, s3, s5
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_cmp_lt_u64_e64 s2, s[4:5], s[2:3]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX11-NEXT:    s_add_u32 s2, s2, s4
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX11-NEXT:    s_addc_u32 s3, s3, s5
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v0, s2, s4, v0
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s5, 0, s2
+; GFX11-NEXT:    v_add_co_u32 v0, s2, s2, v0
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s3, 0, s2
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    s_endpgm
   %uadd = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
@@ -436,21 +441,23 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_add_u32 s6, s4, s6
-; SI-NEXT:    v_mov_b32_e32 v0, s4
-; SI-NEXT:    s_addc_u32 s7, s5, s7
-; SI-NEXT:    v_mov_b32_e32 v1, s5
-; SI-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    s_add_u32 s4, s4, s6
+; SI-NEXT:    s_cselect_b64 s[12:13], -1, 0
+; SI-NEXT:    s_or_b32 s6, s12, s13
+; SI-NEXT:    s_cmp_lg_u32 s6, 0
+; SI-NEXT:    s_addc_u32 s5, s5, s7
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; SI-NEXT:    s_mov_b32 s0, s2
 ; SI-NEXT:    s_mov_b32 s1, s3
 ; SI-NEXT:    s_mov_b32 s2, s10
 ; SI-NEXT:    s_mov_b32 s3, s11
-; SI-NEXT:    v_mov_b32_e32 v3, s7
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
 ; SI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -458,37 +465,37 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_add_u32 s2, s4, s6
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_add_u32 s0, s4, s6
-; VI-NEXT:    v_mov_b32_e32 v4, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    s_addc_u32 s1, s5, s7
-; VI-NEXT:    v_mov_b32_e32 v5, s5
-; VI-NEXT:    v_mov_b32_e32 v7, s1
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v6, s0
-; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT:    s_addc_u32 s0, s5, s7
+; VI-NEXT:    v_mov_b32_e32 v4, s2
+; VI-NEXT:    v_mov_b32_e32 v5, s0
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; VI-NEXT:    flat_store_byte v[2:3], v0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: s_uaddo_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_add_u32 s0, s12, s14
-; GFX9-NEXT:    v_mov_b32_e32 v0, s12
-; GFX9-NEXT:    v_mov_b32_e32 v1, s13
-; GFX9-NEXT:    s_addc_u32 s1, s13, s15
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT:    s_add_u32 s2, s12, s14
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT:    s_addc_u32 s0, s13, s15
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT:    global_store_byte v2, v3, s[10:11]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: s_uaddo_i64:
@@ -497,10 +504,12 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_add_u32 s0, s12, s14
-; GFX10-NEXT:    s_addc_u32 s1, s13, s15
+; GFX10-NEXT:    s_cselect_b32 s1, -1, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, s0
+; GFX10-NEXT:    s_cmp_lg_u32 s1, 0
+; GFX10-NEXT:    s_addc_u32 s1, s13, s15
+; GFX10-NEXT:    s_cselect_b32 s0, -1, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s1
-; GFX10-NEXT:    v_cmp_lt_u64_e64 s0, s[0:1], s[12:13]
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s0
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
 ; GFX10-NEXT:    global_store_byte v2, v3, s[10:11]
@@ -510,12 +519,13 @@ define amdgpu_kernel void @s_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_add_u32 s6, s4, s6
-; GFX11-NEXT:    s_addc_u32 s7, s5, s7
-; GFX11-NEXT:    v_mov_b32_e32 v0, s6
-; GFX11-NEXT:    v_cmp_lt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    s_add_u32 s4, s4, s6
+; GFX11-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX11-NEXT:    v_mov_b32_e32 v0, s4
+; GFX11-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX11-NEXT:    s_addc_u32 s5, s5, s7
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -551,10 +561,10 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    s_mov_b32 s4, s2
 ; SI-NEXT:    s_mov_b32 s5, s3
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_add_i32_e32 v2, vcc, v0, v2
-; SI-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; SI-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; SI-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; SI-NEXT:    buffer_store_byte v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -574,10 +584,9 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; VI-NEXT:    v_mov_b32_e32 v6, s2
 ; VI-NEXT:    v_mov_b32_e32 v7, s3
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_add_u32_e32 v2, vcc, v0, v2
-; VI-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; VI-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; VI-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; VI-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; VI-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; VI-NEXT:    flat_store_byte v[6:7], v0
 ; VI-NEXT:    s_endpgm
@@ -590,10 +599,9 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX9-NEXT:    global_load_dwordx2 v[0:1], v4, s[12:13]
 ; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[14:15]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT:    global_store_dwordx2 v4, v[0:1], s[8:9]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
 ; GFX9-NEXT:    s_endpgm
@@ -607,12 +615,11 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX10-NEXT:    global_load_dwordx2 v[0:1], v4, s[12:13]
 ; GFX10-NEXT:    global_load_dwordx2 v[2:3], v4, s[14:15]
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX10-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX10-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX10-NEXT:    global_store_dwordx2 v4, v[0:1], s[8:9]
+; GFX10-NEXT:    global_store_byte v4, v2, s[10:11]
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: v_uaddo_i64:
@@ -624,14 +631,12 @@ define amdgpu_kernel void @v_uaddo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX11-NEXT:    global_load_b64 v[0:1], v4, s[4:5]
 ; GFX11-NEXT:    global_load_b64 v[2:3], v4, s[6:7]
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    global_store_b64 v4, v[2:3], s[0:1]
-; GFX11-NEXT:    global_store_b8 v4, v0, s[2:3]
+; GFX11-NEXT:    global_store_b64 v4, v[0:1], s[0:1]
+; GFX11-NEXT:    global_store_b8 v4, v2, s[2:3]
 ; GFX11-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
index 923017400adb1..7f89581d00fde 100644
--- a/llvm/test/CodeGen/AMDGPU/uaddsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/uaddsat.ll
@@ -693,52 +693,47 @@ define i64 @v_uaddsat_i64(i64 %lhs, i64 %rhs) {
 ; GFX6-LABEL: v_uaddsat_i64:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT:    v_add_i32_e32 v2, vcc, v0, v2
-; GFX6-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX6-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_uaddsat_i64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v0, v2
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v1, v3, vcc
-; GFX8-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_uaddsat_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_add_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT:    v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX10-LABEL: v_uaddsat_i64:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc_lo
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: v_uaddsat_i64:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_add_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v2, -1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v3, -1, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, -1, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %result = call i64 @llvm.uadd.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %result
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index dc11e81476a7e..09e79102d7564 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -146,8 +146,11 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s8, s8, s9
+; GCN-IR-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT:    s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
@@ -157,9 +160,9 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_add_u32 s14, s6, -1
 ; GCN-IR-NEXT:    s_addc_u32 s15, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[2:3], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s2, s2, s16
-; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    s_add_u32 s10, s2, s16
+; GCN-IR-NEXT:    s_addc_u32 s11, s3, 0
+; GCN-IR-NEXT:    s_mov_b64 s[2:3], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -167,19 +170,22 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s4, s14, s12
-; GCN-IR-NEXT:    s_subb_u32 s4, s15, s13
-; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s11, s10
-; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[10:11], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s10
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, s11
-; GCN-IR-NEXT:    s_add_u32 s2, s2, 1
-; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[2:3], 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[2:3], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s2, s14, s12
+; GCN-IR-NEXT:    s_subb_u32 s2, s15, s13
+; GCN-IR-NEXT:    s_ashr_i32 s2, s2, 31
+; GCN-IR-NEXT:    s_mov_b32 s3, s2
+; GCN-IR-NEXT:    s_and_b32 s4, s2, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[2:3], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s16
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s17
+; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s16, s16, s17
+; GCN-IR-NEXT:    s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[2:3], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow7
@@ -313,19 +319,19 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v14, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v15, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[6:7], v14, v15
+; GCN-IR-NEXT:    v_min_u32_e32 v9, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[6:7], v8, v9
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[6:7], 0, 0, s[6:7]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[8:9]
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[6:7], 0, 0, s[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[6:7]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[8:9]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v0, 0, s[4:5]
@@ -333,55 +339,54 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v8
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v2
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v10
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v0, v14
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v15
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v10
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v8
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, v6, v9
+; GCN-IR-NEXT:    v_addc_u32_e64 v13, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v10, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v11, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v8, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v9, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v7
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v7, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v6, v0
@@ -927,34 +932,37 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc5
+; GCN-IR-NEXT:    s_flbit_i32_b64 s14, s[2:3]
+; GCN-IR-NEXT:    s_add_u32 s8, s14, 0xffffffc5
 ; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[6:7], s[10:11]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[10:11], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s6, s6, s7
+; GCN-IR-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT:    s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
-; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
-; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_add_u32 s12, s2, -1
+; GCN-IR-NEXT:    s_addc_u32 s13, s3, -1
+; GCN-IR-NEXT:    s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -962,19 +970,22 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s4, s14, s10
-; GCN-IR-NEXT:    s_subb_u32 s4, s15, s11
-; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s13, s12
-; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
-; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT:    s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, s12, s10
+; GCN-IR-NEXT:    s_subb_u32 s4, s13, s11
+; GCN-IR-NEXT:    s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s9, s8
+; GCN-IR-NEXT:    s_and_b32 s4, s8, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[8:9], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT:    s_add_u32 s14, s14, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s16, s16, s17
+; GCN-IR-NEXT:    s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT:    s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_3
 ; GCN-IR-NEXT:  .LBB8_4: ; %Flow6
@@ -1098,12 +1109,12 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffd0, v10
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffd0, v8
+; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[6:7]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[4:5]
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0x8000
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v3, 0, s[4:5]
@@ -1113,55 +1124,54 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v8
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB9_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
@@ -1188,13 +1198,13 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], 48, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1202,52 +1212,51 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v7
+; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 0x8000, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v6
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB10_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB10_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB10_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
@@ -1294,52 +1303,58 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s8, 59, s12
+; GCN-IR-NEXT:    s_flbit_i32_b64 s10, s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s8, 59, s10
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[4:5], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s7, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_add_u32 s11, s8, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s6, s6, s7
+; GCN-IR-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT:    s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[8:9], s[2:3], s10
-; GCN-IR-NEXT:    s_add_u32 s2, s12, 0xffffffc4
-; GCN-IR-NEXT:    s_addc_u32 s3, 0, -1
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
+; GCN-IR-NEXT:    s_lshr_b64 s[2:3], s[2:3], s11
+; GCN-IR-NEXT:    s_add_u32 s10, s10, 0xffffffc4
+; GCN-IR-NEXT:    s_addc_u32 s11, 0, -1
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[2:3], 1
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[6:7], s[10:11], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s4, 23, s8
-; GCN-IR-NEXT:    s_subb_u32 s4, 0, s9
-; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
-; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
-; GCN-IR-NEXT:    s_and_b32 s10, s10, 24
-; GCN-IR-NEXT:    s_sub_u32 s8, s8, s10
-; GCN-IR-NEXT:    s_subb_u32 s9, s9, 0
-; GCN-IR-NEXT:    s_add_u32 s2, s2, 1
-; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[2:3], s[2:3], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, 23, s2
+; GCN-IR-NEXT:    s_subb_u32 s4, 0, s3
+; GCN-IR-NEXT:    s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT:    s_and_b32 s4, s8, 1
+; GCN-IR-NEXT:    s_and_b32 s8, s8, 24
+; GCN-IR-NEXT:    s_sub_u32 s2, s2, s8
+; GCN-IR-NEXT:    s_subb_u32 s3, s3, 0
+; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[12:13], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s12, s12, s13
+; GCN-IR-NEXT:    s_cmp_lg_u32 s12, 0
+; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[12:13], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_3
 ; GCN-IR-NEXT:  .LBB11_4: ; %Flow6
@@ -1388,13 +1403,13 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], 59, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 59, v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v1, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v0, 0, s[4:5]
@@ -1402,51 +1417,50 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v2, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc4, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 0xffffffc4, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[0:1], v[0:1], v7
+; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 23, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v0, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 24, v8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 23, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v6, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v6
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 24, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v6
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v7, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_or_b32_e32 v3, v4, v0
diff --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index dc25caadb99a9..a0dc07745ccb2 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -170,35 +170,38 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b64 s10, s[6:7]
-; GCN-IR-NEXT:    s_flbit_i32_b64 s18, s[2:3]
+; GCN-IR-NEXT:    s_flbit_i32_b64 s16, s[2:3]
 ; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[12:13]
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s18
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s16
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
 ; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
 ; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s9, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s8, s8, s9
+; GCN-IR-NEXT:    s_cmp_lg_u32 s8, 0
+; GCN-IR-NEXT:    s_addc_u32 s8, s13, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[8:9], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s14
-; GCN-IR-NEXT:    s_add_u32 s16, s6, -1
-; GCN-IR-NEXT:    s_addc_u32 s17, s7, -1
+; GCN-IR-NEXT:    s_add_u32 s14, s6, -1
+; GCN-IR-NEXT:    s_addc_u32 s15, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[4:5], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s10, s4, s18
-; GCN-IR-NEXT:    s_addc_u32 s11, s5, 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
+; GCN-IR-NEXT:    s_add_u32 s16, s4, s16
+; GCN-IR-NEXT:    s_addc_u32 s17, s5, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -206,19 +209,22 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s9, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[14:15], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s4, s16, s12
-; GCN-IR-NEXT:    s_subb_u32 s4, s17, s13
-; GCN-IR-NEXT:    s_ashr_i32 s14, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s15, s14
-; GCN-IR-NEXT:    s_and_b32 s4, s14, 1
-; GCN-IR-NEXT:    s_and_b64 s[14:15], s[14:15], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s12, s12, s14
-; GCN-IR-NEXT:    s_subb_u32 s13, s13, s15
-; GCN-IR-NEXT:    s_add_u32 s10, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[10:11], 0
-; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s4, s14, s12
+; GCN-IR-NEXT:    s_subb_u32 s4, s15, s13
+; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s11, s10
+; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
+; GCN-IR-NEXT:    s_and_b64 s[18:19], s[10:11], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s12, s12, s18
+; GCN-IR-NEXT:    s_subb_u32 s13, s13, s19
+; GCN-IR-NEXT:    s_add_u32 s16, s16, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s18, s18, s19
+; GCN-IR-NEXT:    s_cmp_lg_u32 s18, 0
+; GCN-IR-NEXT:    s_addc_u32 s17, s17, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[18:19], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow7
@@ -362,12 +368,12 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
+; GCN-IR-NEXT:    v_min_u32_e32 v11, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v10, v11
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -383,54 +389,53 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v6, v12
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v6, v13
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], -1, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v6, v10
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, v6, v11
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v8
+; GCN-IR-NEXT:    v_addc_u32_e64 v15, s[8:9], -1, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v11, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v9, vcc, v9, v11, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v14
+; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v15, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
@@ -948,34 +953,37 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc5
+; GCN-IR-NEXT:    s_flbit_i32_b64 s14, s[2:3]
+; GCN-IR-NEXT:    s_add_u32 s8, s14, 0xffffffc5
 ; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[6:7], s[10:11]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[10:11], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s6, s6, s7
+; GCN-IR-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT:    s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
-; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
-; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
-; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_add_u32 s12, s2, -1
+; GCN-IR-NEXT:    s_addc_u32 s13, s3, -1
+; GCN-IR-NEXT:    s_sub_u32 s14, 58, s14
+; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB6_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -983,19 +991,22 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s4, s14, s10
-; GCN-IR-NEXT:    s_subb_u32 s4, s15, s11
-; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT:    s_mov_b32 s13, s12
-; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
-; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT:    s_subb_u32 s11, s11, s13
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[8:9], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, s12, s10
+; GCN-IR-NEXT:    s_subb_u32 s4, s13, s11
+; GCN-IR-NEXT:    s_ashr_i32 s8, s4, 31
+; GCN-IR-NEXT:    s_mov_b32 s9, s8
+; GCN-IR-NEXT:    s_and_b32 s4, s8, 1
+; GCN-IR-NEXT:    s_and_b64 s[16:17], s[8:9], s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s10, s10, s16
+; GCN-IR-NEXT:    s_subb_u32 s11, s11, s17
+; GCN-IR-NEXT:    s_add_u32 s14, s14, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s16, s16, s17
+; GCN-IR-NEXT:    s_cmp_lg_u32 s16, 0
+; GCN-IR-NEXT:    s_addc_u32 s15, s15, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[16:17], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_3
 ; GCN-IR-NEXT:  .LBB6_4: ; %Flow6
@@ -1064,52 +1075,58 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT:    s_sub_u32 s8, 59, s12
+; GCN-IR-NEXT:    s_flbit_i32_b64 s10, s[2:3]
+; GCN-IR-NEXT:    s_sub_u32 s8, 59, s10
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[4:5], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s7, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[12:13]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_add_u32 s11, s8, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s6, s6, s7
+; GCN-IR-NEXT:    s_cmp_lg_u32 s6, 0
+; GCN-IR-NEXT:    s_addc_u32 s6, s9, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[6:7], -1, 0
 ; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[10:11], s[2:3], s10
-; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc4
-; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
+; GCN-IR-NEXT:    s_lshr_b64 s[8:9], s[2:3], s11
+; GCN-IR-NEXT:    s_add_u32 s12, s10, 0xffffffc4
+; GCN-IR-NEXT:    s_addc_u32 s13, 0, -1
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
 ; GCN-IR-NEXT:  .LBB7_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_lshr_b32 s4, s7, 31
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[6:7], s[12:13], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s4, 23, s10
-; GCN-IR-NEXT:    s_subb_u32 s4, 0, s11
-; GCN-IR-NEXT:    s_ashr_i32 s12, s4, 31
-; GCN-IR-NEXT:    s_and_b32 s4, s12, 1
-; GCN-IR-NEXT:    s_and_b32 s12, s12, 24
-; GCN-IR-NEXT:    s_sub_u32 s10, s10, s12
-; GCN-IR-NEXT:    s_subb_u32 s11, s11, 0
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 0
-; GCN-IR-NEXT:    s_mov_b64 s[12:13], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_or_b64 s[6:7], s[10:11], s[6:7]
+; GCN-IR-NEXT:    s_sub_u32 s4, 23, s8
+; GCN-IR-NEXT:    s_subb_u32 s4, 0, s9
+; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
+; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
+; GCN-IR-NEXT:    s_and_b32 s10, s10, 24
+; GCN-IR-NEXT:    s_sub_u32 s8, s8, s10
+; GCN-IR-NEXT:    s_subb_u32 s9, s9, 0
+; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
+; GCN-IR-NEXT:    s_cselect_b64 s[14:15], -1, 0
+; GCN-IR-NEXT:    s_or_b32 s14, s14, s15
+; GCN-IR-NEXT:    s_cmp_lg_u32 s14, 0
+; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
+; GCN-IR-NEXT:    s_cselect_b64 s[14:15], -1, 0
+; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_3
 ; GCN-IR-NEXT:  .LBB7_4: ; %Flow6
@@ -1241,8 +1258,8 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffd0, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffd0, v8
 ; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
@@ -1257,54 +1274,53 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB8_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0x8000
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[8:9], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[10:11], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, -1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, -1, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e32 v12, vcc, 47, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v6
+; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[8:9], 0, 0, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v12, v8
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v13, v9, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v11, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v7, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v12
+; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB8_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB8_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB8_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
@@ -1337,8 +1353,8 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v8
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
@@ -1352,51 +1368,50 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], vcc, -1
+; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], s[4:5]
+; GCN-IR-NEXT:    s_xor_b64 s[4:5], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
-; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_addc_u32_e64 v11, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
+; GCN-IR-NEXT:    s_movk_i32 s10, 0x7fff
 ; GCN-IR-NEXT:  .LBB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
-; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
-; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s10, v6
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v6, v8
+; GCN-IR-NEXT:    v_subbrev_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
+; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB9_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
-; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
-; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
+; GCN-IR-NEXT:  .LBB9_5: ; %Flow4
+; GCN-IR-NEXT:    s_or_b64 exec, exec, s[4:5]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
 ; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
diff --git a/llvm/test/CodeGen/AMDGPU/usubo.ll b/llvm/test/CodeGen/AMDGPU/usubo.ll
index 0289dab4588a2..d67a7b151948e 100644
--- a/llvm/test/CodeGen/AMDGPU/usubo.ll
+++ b/llvm/test/CodeGen/AMDGPU/usubo.ll
@@ -14,15 +14,16 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    s_mov_b32 s4, s0
-; SI-NEXT:    s_sub_u32 s0, s2, s8
-; SI-NEXT:    v_mov_b32_e32 v0, s2
+; SI-NEXT:    s_sub_u32 s2, s2, s8
 ; SI-NEXT:    s_mov_b32 s5, s1
-; SI-NEXT:    s_subb_u32 s1, s3, s9
+; SI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT:    s_or_b32 s0, s0, s1
+; SI-NEXT:    s_cmp_lg_u32 s0, 0
+; SI-NEXT:    s_subb_u32 s3, s3, s9
+; SI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; SI-NEXT:    v_mov_b32_e32 v1, s3
-; SI-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v1, s1
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT:    v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT:    v_add_i32_e32 v0, vcc, s2, v0
 ; SI-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -33,15 +34,15 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; VI-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x34
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_sub_u32 s0, s2, s4
-; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_sub_u32 s2, s2, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT:    s_subb_u32 s3, s3, s5
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[0:1]
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    s_subb_u32 s1, s3, s5
-; VI-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[2:3]
-; VI-NEXT:    v_mov_b32_e32 v3, s1
-; VI-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; VI-NEXT:    v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT:    v_add_u32_e32 v2, vcc, s2, v2
 ; VI-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
 ; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; VI-NEXT:    s_endpgm
@@ -52,14 +53,14 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX9-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-NEXT:    s_sub_u32 s4, s2, s6
-; GFX9-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-NEXT:    s_subb_u32 s5, s3, s7
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, s[4:5], v[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v1, s5
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s4, v0
+; GFX9-NEXT:    s_sub_u32 s6, s2, s6
+; GFX9-NEXT:    s_cselect_b64 s[4:5], -1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[4:5], 0
+; GFX9-NEXT:    s_subb_u32 s4, s3, s7
+; GFX9-NEXT:    s_cselect_b64 s[2:3], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[2:3]
+; GFX9-NEXT:    v_mov_b32_e32 v1, s4
+; GFX9-NEXT:    v_add_co_u32_e32 v0, vcc, s6, v0
 ; GFX9-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
 ; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX9-NEXT:    s_endpgm
@@ -71,12 +72,14 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX10-NEXT:    s_load_dwordx2 s[6:7], s[4:5], 0x34
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_sub_u32 s4, s2, s6
-; GFX10-NEXT:    s_subb_u32 s5, s3, s7
-; GFX10-NEXT:    v_cmp_gt_u64_e64 s2, s[4:5], s[2:3]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
-; GFX10-NEXT:    v_add_co_u32 v0, s2, s4, v0
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, s2, s5, 0, s2
+; GFX10-NEXT:    s_sub_u32 s2, s2, s6
+; GFX10-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX10-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX10-NEXT:    s_subb_u32 s3, s3, s7
+; GFX10-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
+; GFX10-NEXT:    v_add_co_u32 v0, s2, s2, v0
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, s2, s3, 0, s2
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX10-NEXT:    s_endpgm
 ;
@@ -87,14 +90,16 @@ define amdgpu_kernel void @s_usubo_i64_zext(ptr addrspace(1) %out, i64 %a, i64 %
 ; GFX11-NEXT:    s_load_b64 s[4:5], s[4:5], 0x34
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_sub_u32 s4, s2, s4
-; GFX11-NEXT:    s_subb_u32 s5, s3, s5
-; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_cmp_gt_u64_e64 s2, s[4:5], s[2:3]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s2
+; GFX11-NEXT:    s_sub_u32 s2, s2, s4
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT:    s_cmp_lg_u32 s4, 0
+; GFX11-NEXT:    s_subb_u32 s3, s3, s5
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v0, s2, s4, v0
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s5, 0, s2
+; GFX11-NEXT:    v_add_co_u32 v0, s2, s2, v0
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s3, 0, s2
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
 ; GFX11-NEXT:    s_endpgm
   %usub = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %a, i64 %b) #0
@@ -435,21 +440,23 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    s_mov_b32 s11, 0xf000
 ; SI-NEXT:    s_mov_b32 s10, -1
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_sub_u32 s6, s4, s6
-; SI-NEXT:    v_mov_b32_e32 v0, s4
-; SI-NEXT:    s_subb_u32 s7, s5, s7
-; SI-NEXT:    v_mov_b32_e32 v1, s5
-; SI-NEXT:    v_cmp_gt_u64_e32 vcc, s[6:7], v[0:1]
-; SI-NEXT:    v_mov_b32_e32 v2, s6
+; SI-NEXT:    s_sub_u32 s4, s4, s6
+; SI-NEXT:    s_cselect_b64 s[12:13], -1, 0
+; SI-NEXT:    s_or_b32 s6, s12, s13
+; SI-NEXT:    s_cmp_lg_u32 s6, 0
+; SI-NEXT:    s_subb_u32 s5, s5, s7
 ; SI-NEXT:    s_mov_b32 s8, s0
 ; SI-NEXT:    s_mov_b32 s9, s1
+; SI-NEXT:    v_mov_b32_e32 v0, s4
+; SI-NEXT:    v_mov_b32_e32 v1, s5
+; SI-NEXT:    s_cselect_b64 s[4:5], -1, 0
 ; SI-NEXT:    s_mov_b32 s0, s2
 ; SI-NEXT:    s_mov_b32 s1, s3
 ; SI-NEXT:    s_mov_b32 s2, s10
 ; SI-NEXT:    s_mov_b32 s3, s11
-; SI-NEXT:    v_mov_b32_e32 v3, s7
-; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
 ; SI-NEXT:    buffer_store_byte v0, off, s[0:3], 0
 ; SI-NEXT:    s_endpgm
 ;
@@ -457,37 +464,37 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; VI:       ; %bb.0:
 ; VI-NEXT:    s_load_dwordx8 s[0:7], s[4:5], 0x24
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_sub_u32 s2, s4, s6
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
-; VI-NEXT:    s_sub_u32 s0, s4, s6
-; VI-NEXT:    v_mov_b32_e32 v4, s4
 ; VI-NEXT:    v_mov_b32_e32 v1, s1
-; VI-NEXT:    s_subb_u32 s1, s5, s7
-; VI-NEXT:    v_mov_b32_e32 v5, s5
-; VI-NEXT:    v_mov_b32_e32 v7, s1
-; VI-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
-; VI-NEXT:    v_mov_b32_e32 v6, s0
-; VI-NEXT:    v_mov_b32_e32 v2, s2
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; VI-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; VI-NEXT:    s_subb_u32 s0, s5, s7
+; VI-NEXT:    v_mov_b32_e32 v4, s2
+; VI-NEXT:    v_mov_b32_e32 v5, s0
+; VI-NEXT:    s_cselect_b64 s[0:1], -1, 0
 ; VI-NEXT:    v_mov_b32_e32 v3, s3
-; VI-NEXT:    flat_store_dwordx2 v[0:1], v[6:7]
-; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
+; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
 ; VI-NEXT:    flat_store_byte v[2:3], v0
 ; VI-NEXT:    s_endpgm
 ;
 ; GFX9-LABEL: s_usubo_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT:    v_mov_b32_e32 v4, 0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_sub_u32 s0, s12, s14
-; GFX9-NEXT:    v_mov_b32_e32 v0, s12
-; GFX9-NEXT:    v_mov_b32_e32 v1, s13
-; GFX9-NEXT:    s_subb_u32 s1, s13, s15
-; GFX9-NEXT:    v_mov_b32_e32 v3, s1
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
-; GFX9-NEXT:    v_mov_b32_e32 v2, s0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX9-NEXT:    s_sub_u32 s2, s12, s14
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    s_cmp_lg_u64 s[0:1], 0
+; GFX9-NEXT:    s_subb_u32 s0, s13, s15
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    v_mov_b32_e32 v1, s0
+; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[0:1]
+; GFX9-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
+; GFX9-NEXT:    global_store_byte v2, v3, s[10:11]
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: s_usubo_i64:
@@ -496,10 +503,12 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_sub_u32 s0, s12, s14
-; GFX10-NEXT:    s_subb_u32 s1, s13, s15
+; GFX10-NEXT:    s_cselect_b32 s1, -1, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, s0
+; GFX10-NEXT:    s_cmp_lg_u32 s1, 0
+; GFX10-NEXT:    s_subb_u32 s1, s13, s15
+; GFX10-NEXT:    s_cselect_b32 s0, -1, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s1
-; GFX10-NEXT:    v_cmp_gt_u64_e64 s0, s[0:1], s[12:13]
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s0
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[8:9]
 ; GFX10-NEXT:    global_store_byte v2, v3, s[10:11]
@@ -509,12 +518,13 @@ define amdgpu_kernel void @s_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b256 s[0:7], s[4:5], 0x24
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_sub_u32 s6, s4, s6
-; GFX11-NEXT:    s_subb_u32 s7, s5, s7
-; GFX11-NEXT:    v_mov_b32_e32 v0, s6
-; GFX11-NEXT:    v_cmp_gt_u64_e64 s4, s[6:7], s[4:5]
-; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    s_sub_u32 s4, s4, s6
+; GFX11-NEXT:    s_cselect_b32 s6, -1, 0
+; GFX11-NEXT:    v_mov_b32_e32 v0, s4
+; GFX11-NEXT:    s_cmp_lg_u32 s6, 0
+; GFX11-NEXT:    s_subb_u32 s5, s5, s7
+; GFX11-NEXT:    s_cselect_b32 s4, -1, 0
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_mov_b32 v1, s5
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_store_b64 v2, v[0:1], s[0:1]
@@ -550,10 +560,10 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; SI-NEXT:    s_mov_b32 s4, s2
 ; SI-NEXT:    s_mov_b32 s5, s3
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_sub_i32_e32 v2, vcc, v0, v2
-; SI-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; SI-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; SI-NEXT:    buffer_store_dwordx2 v[2:3], off, s[8:11], 0
+; SI-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; SI-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; SI-NEXT:    buffer_store_byte v0, off, s[4:7], 0
 ; SI-NEXT:    s_endpgm
@@ -573,10 +583,9 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; VI-NEXT:    v_mov_b32_e32 v6, s2
 ; VI-NEXT:    v_mov_b32_e32 v7, s3
 ; VI-NEXT:    s_waitcnt vmcnt(0)
-; VI-NEXT:    v_sub_u32_e32 v2, vcc, v0, v2
-; VI-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; VI-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; VI-NEXT:    flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT:    v_sub_u32_e32 v0, vcc, v0, v2
+; VI-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; VI-NEXT:    flat_store_dwordx2 v[4:5], v[0:1]
 ; VI-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; VI-NEXT:    flat_store_byte v[6:7], v0
 ; VI-NEXT:    s_endpgm
@@ -589,10 +598,9 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX9-NEXT:    global_load_dwordx2 v[0:1], v4, s[12:13]
 ; GFX9-NEXT:    global_load_dwordx2 v[2:3], v4, s[14:15]
 ; GFX9-NEXT:    s_waitcnt vmcnt(0)
-; GFX9-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT:    global_store_dwordx2 v4, v[0:1], s[8:9]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; GFX9-NEXT:    global_store_byte v4, v0, s[10:11]
 ; GFX9-NEXT:    s_endpgm
@@ -606,12 +614,11 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX10-NEXT:    global_load_dwordx2 v[0:1], v4, s[12:13]
 ; GFX10-NEXT:    global_load_dwordx2 v[2:3], v4, s[14:15]
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT:    v_sub_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
-; GFX10-NEXT:    global_store_dwordx2 v4, v[2:3], s[8:9]
-; GFX10-NEXT:    global_store_byte v4, v0, s[10:11]
+; GFX10-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX10-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
+; GFX10-NEXT:    global_store_dwordx2 v4, v[0:1], s[8:9]
+; GFX10-NEXT:    global_store_byte v4, v2, s[10:11]
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: v_usubo_i64:
@@ -623,14 +630,12 @@ define amdgpu_kernel void @v_usubo_i64(ptr addrspace(1) %out, ptr addrspace(1) %
 ; GFX11-NEXT:    global_load_b64 v[0:1], v4, s[4:5]
 ; GFX11-NEXT:    global_load_b64 v[2:3], v4, s[6:7]
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX11-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX11-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc_lo
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    global_store_b64 v4, v[2:3], s[0:1]
-; GFX11-NEXT:    global_store_b8 v4, v0, s[2:3]
+; GFX11-NEXT:    global_store_b64 v4, v[0:1], s[0:1]
+; GFX11-NEXT:    global_store_b8 v4, v2, s[2:3]
 ; GFX11-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %tid.ext = sext i32 %tid to i64
diff --git a/llvm/test/CodeGen/AMDGPU/usubsat.ll b/llvm/test/CodeGen/AMDGPU/usubsat.ll
index 90491a07289a0..3ddb2f02c48fe 100644
--- a/llvm/test/CodeGen/AMDGPU/usubsat.ll
+++ b/llvm/test/CodeGen/AMDGPU/usubsat.ll
@@ -730,52 +730,38 @@ define i64 @v_usubsat_i64(i64 %lhs, i64 %rhs) {
 ; GFX6-LABEL: v_usubsat_i64:
 ; GFX6:       ; %bb.0:
 ; GFX6-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX6-NEXT:    v_sub_i32_e32 v2, vcc, v0, v2
-; GFX6-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; GFX6-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX6-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX6-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX6-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX6-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX6-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX8-LABEL: v_usubsat_i64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX8-NEXT:    v_sub_u32_e32 v2, vcc, v0, v2
-; GFX8-NEXT:    v_subb_u32_e32 v3, vcc, v1, v3, vcc
-; GFX8-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX8-NEXT:    v_sub_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX9-LABEL: v_usubsat_i64:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT:    v_sub_co_u32_e32 v2, vcc, v0, v2
-; GFX9-NEXT:    v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
-; GFX9-NEXT:    v_cmp_gt_u64_e32 vcc, v[2:3], v[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX9-NEXT:    v_sub_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT:    v_subb_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc
 ; GFX9-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10-LABEL: v_usubsat_i64:
-; GFX10:       ; %bb.0:
-; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX10-NEXT:    v_sub_co_ci_u32_e32 v3, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX10-NEXT:    s_setpc_b64 s[30:31]
-;
-; GFX11-LABEL: v_usubsat_i64:
-; GFX11:       ; %bb.0:
-; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    v_sub_co_u32 v2, vcc_lo, v0, v2
-; GFX11-NEXT:    v_sub_co_ci_u32_e64 v3, null, v1, v3, vcc_lo
-; GFX11-NEXT:    v_cmp_gt_u64_e32 vcc_lo, v[2:3], v[0:1]
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v2, 0, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v3, 0, vcc_lo
-; GFX11-NEXT:    s_setpc_b64 s[30:31]
+; GFX10PLUS-LABEL: v_usubsat_i64:
+; GFX10PLUS:       ; %bb.0:
+; GFX10PLUS-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10PLUS-NEXT:    v_sub_co_u32 v0, vcc_lo, v0, v2
+; GFX10PLUS-NEXT:    v_sub_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10PLUS-NEXT:    v_cndmask_b32_e64 v0, v0, 0, vcc_lo
+; GFX10PLUS-NEXT:    v_cndmask_b32_e64 v1, v1, 0, vcc_lo
+; GFX10PLUS-NEXT:    s_setpc_b64 s[30:31]
   %result = call i64 @llvm.usub.sat.i64(i64 %lhs, i64 %rhs)
   ret i64 %result
 }

>From a9ced610d87b22276812053a357f57c7e11b5e42 Mon Sep 17 00:00:00 2001
From: John Lu <John.Lu at amd.com>
Date: Thu, 2 Oct 2025 10:11:24 -0500
Subject: [PATCH 18/18] Address feedback

Signed-off-by: John Lu <John.Lu at amd.com>
---
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 12 +++++-------
 1 file changed, 5 insertions(+), 7 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 4c60bc95e2c06..0ba231aa40c28 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -6129,7 +6129,7 @@ SITargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
           .addImm(0);
     }
 
-    unsigned Opc = (MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO)
+    unsigned Opc = MI.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO
                        ? AMDGPU::S_ADDC_U32
                        : AMDGPU::S_SUBB_U32;
 
@@ -16591,26 +16591,24 @@ SDValue SITargetLowering::performSetCCCombine(SDNode *N,
                           sd_match(LHS, m_Sub(m_Specific(RHS), m_Value()))) ||
                          (CC == ISD::SETEQ && CRHS && CRHS->isZero() &&
                           sd_match(LHS, m_Add(m_Value(), m_One()))))) {
-    EVT TargetType = MVT::i32;
-    EVT CarryVT = MVT::i1;
     bool IsAdd = LHS.getOpcode() == ISD::ADD;
 
     SDValue Op0 = LHS.getOperand(0);
     SDValue Op1 = LHS.getOperand(1);
 
-    SDValue Op0Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, Op0);
-    SDValue Op1Lo = DAG.getNode(ISD::TRUNCATE, SL, TargetType, Op1);
+    SDValue Op0Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Op0);
+    SDValue Op1Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Op1);
 
     SDValue Op0Hi = getHiHalf64(Op0, DAG);
     SDValue Op1Hi = getHiHalf64(Op1, DAG);
 
     SDValue NodeLo =
         DAG.getNode(IsAdd ? ISD::UADDO : ISD::USUBO, SL,
-                    DAG.getVTList(TargetType, CarryVT), {Op0Lo, Op1Lo});
+                    DAG.getVTList(MVT::i32, MVT::i1), {Op0Lo, Op1Lo});
 
     SDValue CarryInHi = NodeLo.getValue(1);
     SDValue NodeHi = DAG.getNode(IsAdd ? ISD::UADDO_CARRY : ISD::USUBO_CARRY,
-                                 SL, DAG.getVTList(TargetType, CarryVT),
+                                 SL, DAG.getVTList(MVT::i32, MVT::i1),
                                  {Op0Hi, Op1Hi, CarryInHi});
 
     SDValue ResultLo = NodeLo.getValue(0);



More information about the llvm-commits mailing list