[llvm] fc672b6 - [AMDGPU] Improved wide multiplies

Jessica Del via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 22 07:40:44 PST 2023


Author: Jessica Del
Date: 2023-02-22T16:39:06+01:00
New Revision: fc672b6a8b48195b7afaba6f0f9d64e579b1dc72

URL: https://github.com/llvm/llvm-project/commit/fc672b6a8b48195b7afaba6f0f9d64e579b1dc72
DIFF: https://github.com/llvm/llvm-project/commit/fc672b6a8b48195b7afaba6f0f9d64e579b1dc72.diff

LOG: [AMDGPU] Improved wide multiplies

These checks show optimized instructions if an operand is known to be
(partially) zero.

Change-Id: Ie2f6d0d3ee9d5b279d1f4c1dd0787492e39cc77a

Differential Revision: https://reviews.llvm.org/D140208

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h
    llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
    llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
    llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
    llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.mir
    llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
    llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h b/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h
index 7884b3f2ea6e7..9f9e435b9ce27 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Legalizer.h
@@ -22,6 +22,7 @@
 
 #include "llvm/ADT/ArrayRef.h"
 #include "llvm/ADT/StringRef.h"
+#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
 #include "llvm/CodeGen/MachineFunction.h"
 #include "llvm/CodeGen/MachineFunctionPass.h"
 
@@ -75,7 +76,7 @@ class Legalizer : public MachineFunctionPass {
   legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
                           ArrayRef<GISelChangeObserver *> AuxObservers,
                           LostDebugLocObserver &LocObserver,
-                          MachineIRBuilder &MIRBuilder);
+                          MachineIRBuilder &MIRBuilder, GISelKnownBits *KB);
 };
 } // End namespace llvm.
 

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index f57d2b68c86dc..43c123d1a6666 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -21,6 +21,7 @@
 #define LLVM_CODEGEN_GLOBALISEL_LEGALIZERHELPER_H
 
 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
+#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
 #include "llvm/CodeGen/RuntimeLibcalls.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
 
@@ -56,6 +57,7 @@ class LegalizerHelper {
   MachineRegisterInfo &MRI;
   const LegalizerInfo &LI;
   const TargetLowering &TLI;
+  GISelKnownBits *KB;
 
 public:
   enum LegalizeResult {
@@ -74,11 +76,13 @@ class LegalizerHelper {
   /// Expose LegalizerInfo so the clients can re-use.
   const LegalizerInfo &getLegalizerInfo() const { return LI; }
   const TargetLowering &getTargetLowering() const { return TLI; }
+  GISelKnownBits *getKnownBits() const { return KB; }
 
   LegalizerHelper(MachineFunction &MF, GISelChangeObserver &Observer,
                   MachineIRBuilder &B);
   LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
-                  GISelChangeObserver &Observer, MachineIRBuilder &B);
+                  GISelChangeObserver &Observer, MachineIRBuilder &B,
+                  GISelKnownBits *KB = nullptr);
 
   /// Replace \p MI by a sequence of legal instructions that can implement the
   /// same operation. Note that this means \p MI may be deleted, so any iterator

diff  --git a/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp b/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
index 1a13f39c100c9..ee6aaa1a1a187 100644
--- a/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Legalizer.cpp
@@ -18,6 +18,7 @@
 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
 #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
 #include "llvm/CodeGen/GlobalISel/GISelWorkList.h"
 #include "llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h"
 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
@@ -75,6 +76,7 @@ INITIALIZE_PASS_BEGIN(Legalizer, DEBUG_TYPE,
                       false)
 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(GISelKnownBitsAnalysis)
 INITIALIZE_PASS_END(Legalizer, DEBUG_TYPE,
                     "Legalize the Machine IR a function's Machine IR", false,
                     false)
@@ -85,6 +87,8 @@ void Legalizer::getAnalysisUsage(AnalysisUsage &AU) const {
   AU.addRequired<TargetPassConfig>();
   AU.addRequired<GISelCSEAnalysisWrapperPass>();
   AU.addPreserved<GISelCSEAnalysisWrapperPass>();
+  AU.addRequired<GISelKnownBitsAnalysis>();
+  AU.addPreserved<GISelKnownBitsAnalysis>();
   getSelectionDAGFallbackAnalysisUsage(AU);
   MachineFunctionPass::getAnalysisUsage(AU);
 }
@@ -173,7 +177,8 @@ Legalizer::MFResult
 Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
                                    ArrayRef<GISelChangeObserver *> AuxObservers,
                                    LostDebugLocObserver &LocObserver,
-                                   MachineIRBuilder &MIRBuilder) {
+                                   MachineIRBuilder &MIRBuilder,
+                                   GISelKnownBits *KB) {
   MIRBuilder.setMF(MF);
   MachineRegisterInfo &MRI = MF.getRegInfo();
 
@@ -212,7 +217,7 @@ Legalizer::legalizeMachineFunction(MachineFunction &MF, const LegalizerInfo &LI,
   // Now install the observer as the delegate to MF.
   // This will keep all the observers notified about new insertions/deletions.
   RAIIMFObsDelInstaller Installer(MF, WrapperObserver);
-  LegalizerHelper Helper(MF, LI, WrapperObserver, MIRBuilder);
+  LegalizerHelper Helper(MF, LI, WrapperObserver, MIRBuilder, KB);
   LegalizationArtifactCombiner ArtCombiner(MIRBuilder, MRI, LI);
   bool Changed = false;
   SmallVector<MachineInstr *, 128> RetryList;
@@ -338,9 +343,12 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
   if (VerifyDebugLocs > DebugLocVerifyLevel::None)
     AuxObservers.push_back(&LocObserver);
 
+  // This allows Known Bits Analysis in the legalizer.
+  GISelKnownBits *KB = &getAnalysis<GISelKnownBitsAnalysis>().get(MF);
+
   const LegalizerInfo &LI = *MF.getSubtarget().getLegalizerInfo();
-  MFResult Result =
-      legalizeMachineFunction(MF, LI, AuxObservers, LocObserver, *MIRBuilder);
+  MFResult Result = legalizeMachineFunction(MF, LI, AuxObservers, LocObserver,
+                                            *MIRBuilder, KB);
 
   if (Result.FailedOn) {
     reportGISelFailure(MF, TPC, MORE, "gisel-legalize",

diff  --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 6d579c5475d85..7aa868ab97480 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -15,6 +15,7 @@
 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
+#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
@@ -103,13 +104,13 @@ LegalizerHelper::LegalizerHelper(MachineFunction &MF,
                                  MachineIRBuilder &Builder)
     : MIRBuilder(Builder), Observer(Observer), MRI(MF.getRegInfo()),
       LI(*MF.getSubtarget().getLegalizerInfo()),
-      TLI(*MF.getSubtarget().getTargetLowering()) { }
+      TLI(*MF.getSubtarget().getTargetLowering()), KB(nullptr) {}
 
 LegalizerHelper::LegalizerHelper(MachineFunction &MF, const LegalizerInfo &LI,
                                  GISelChangeObserver &Observer,
-                                 MachineIRBuilder &B)
-  : MIRBuilder(B), Observer(Observer), MRI(MF.getRegInfo()), LI(LI),
-    TLI(*MF.getSubtarget().getTargetLowering()) { }
+                                 MachineIRBuilder &B, GISelKnownBits *KB)
+    : MIRBuilder(B), Observer(Observer), MRI(MF.getRegInfo()), LI(LI),
+      TLI(*MF.getSubtarget().getTargetLowering()), KB(KB) {}
 
 LegalizerHelper::LegalizeResult
 LegalizerHelper::legalizeInstrStep(MachineInstr &MI,

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 2c27b267d1b89..52f062927e11d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -2898,15 +2898,18 @@ bool AMDGPULegalizerInfo::legalizeBuildVector(
 // the outer loop going over parts of the result, the outer loop should go
 // over parts of one of the factors. This should result in instruction
 // selection that makes full use of S_ADDC_U32 instructions.
-void AMDGPULegalizerInfo::buildMultiply(
-    LegalizerHelper &Helper, MutableArrayRef<Register> Accum,
-    ArrayRef<Register> Src0, ArrayRef<Register> Src1,
-    bool UsePartialMad64_32, bool SeparateOddAlignedProducts) const {
+void AMDGPULegalizerInfo::buildMultiply(LegalizerHelper &Helper,
+                                        MutableArrayRef<Register> Accum,
+                                        ArrayRef<Register> Src0,
+                                        ArrayRef<Register> Src1,
+                                        bool UsePartialMad64_32,
+                                        bool SeparateOddAlignedProducts) const {
   // Use (possibly empty) vectors of S1 registers to represent the set of
   // carries from one pair of positions to the next.
   using Carry = SmallVector<Register, 2>;
 
   MachineIRBuilder &B = Helper.MIRBuilder;
+  GISelKnownBits &KB = *Helper.getKnownBits();
 
   const LLT S1 = LLT::scalar(1);
   const LLT S32 = LLT::scalar(32);
@@ -2926,6 +2929,12 @@ void AMDGPULegalizerInfo::buildMultiply(
     return Zero64;
   };
 
+  SmallVector<bool, 2> Src0KnownZeros, Src1KnownZeros;
+  for (unsigned i = 0; i < Src0.size(); ++i) {
+    Src0KnownZeros.push_back(KB.getKnownBits(Src0[i]).isZero());
+    Src1KnownZeros.push_back(KB.getKnownBits(Src1[i]).isZero());
+  }
+
   // Merge the given carries into the 32-bit LocalAccum, which is modified
   // in-place.
   //
@@ -2988,9 +2997,14 @@ void AMDGPULegalizerInfo::buildMultiply(
         if (LocalAccum.size() == 1 &&
             (!UsePartialMad64_32 || !CarryIn.empty())) {
           do {
+            // Skip multiplication if one of the operands is 0
             unsigned j1 = DstIndex - j0;
+            if (Src0KnownZeros[j0] || Src1KnownZeros[j1]) {
+              ++j0;
+              continue;
+            }
             auto Mul = B.buildMul(S32, Src0[j0], Src1[j1]);
-            if (!LocalAccum[0]) {
+            if (!LocalAccum[0] || KB.getKnownBits(LocalAccum[0]).isZero()) {
               LocalAccum[0] = Mul.getReg(0);
             } else {
               if (CarryIn.empty()) {
@@ -3030,12 +3044,17 @@ void AMDGPULegalizerInfo::buildMultiply(
 
           do {
             unsigned j1 = DstIndex - j0;
+            if (Src0KnownZeros[j0] || Src1KnownZeros[j1]) {
+              ++j0;
+              continue;
+            }
             auto Mad = B.buildInstr(AMDGPU::G_AMDGPU_MAD_U64_U32, {S64, S1},
                                     {Src0[j0], Src1[j1], Tmp});
             Tmp = Mad.getReg(0);
             if (!HaveSmallAccum)
               CarryOut.push_back(Mad.getReg(1));
             HaveSmallAccum = false;
+
             ++j0;
           } while (j0 <= DstIndex);
 
@@ -3178,7 +3197,6 @@ bool AMDGPULegalizerInfo::legalizeMul(LegalizerHelper &Helper,
   B.buildMergeLikeInstr(DstReg, AccumRegs);
   MI.eraseFromParent();
   return true;
-
 }
 
 // Legalize ctlz/cttz to ffbh/ffbl instead of the default legalization to

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
index f0864f595dc5f..5ef8712b82c40 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.ll
@@ -4,8 +4,8 @@
 declare i32 @llvm.amdgcn.workitem.id.x()
 
 ; A 64-bit multiplication where no arguments were zero extended.
-define amdgpu_kernel void @v_mul_i64_zext_00(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) nounwind {
-; GFX10-LABEL: v_mul_i64_zext_00:
+define amdgpu_kernel void @v_mul_i64_no_zext(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) nounwind {
+; GFX10-LABEL: v_mul_i64_no_zext:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v6, 3, v0
@@ -21,7 +21,7 @@ define amdgpu_kernel void @v_mul_i64_zext_00(ptr addrspace(1) %out, ptr addrspac
 ; GFX10-NEXT:    global_store_dwordx2 v6, v[4:5], s[2:3]
 ; GFX10-NEXT:    s_endpgm
 ;
-; GFX11-LABEL: v_mul_i64_zext_00:
+; GFX11-LABEL: v_mul_i64_no_zext:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b128 s[0:3], s[0:1], 0x2c
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v6, 3, v0
@@ -50,8 +50,8 @@ define amdgpu_kernel void @v_mul_i64_zext_00(ptr addrspace(1) %out, ptr addrspac
 }
 
 ; a 64 bit multiplication where the second argument was zero extended.
-define amdgpu_kernel void @v_mul_i64_zext_01(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
-; GFX10-LABEL: v_mul_i64_zext_01:
+define amdgpu_kernel void @v_mul_i64_zext_src1(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
+; GFX10-LABEL: v_mul_i64_zext_src1:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_clause 0x1
 ; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
@@ -69,7 +69,7 @@ define amdgpu_kernel void @v_mul_i64_zext_01(ptr addrspace(1) %out, ptr addrspac
 ; GFX10-NEXT:    global_store_dwordx2 v0, v[2:3], s[4:5]
 ; GFX10-NEXT:    s_endpgm
 ;
-; GFX11-LABEL: v_mul_i64_zext_01:
+; GFX11-LABEL: v_mul_i64_zext_src1:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    s_load_b128 s[4:7], s[0:1], 0x24
@@ -99,8 +99,8 @@ define amdgpu_kernel void @v_mul_i64_zext_01(ptr addrspace(1) %out, ptr addrspac
 }
 
 ; 64 bit multiplication where the first argument was zero extended.
-define amdgpu_kernel void @v_mul_i64_zext_10(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
-; GFX10-LABEL: v_mul_i64_zext_10:
+define amdgpu_kernel void @v_mul_i64_zext_src0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
+; GFX10-LABEL: v_mul_i64_zext_src0:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_clause 0x1
 ; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
@@ -118,7 +118,7 @@ define amdgpu_kernel void @v_mul_i64_zext_10(ptr addrspace(1) %out, ptr addrspac
 ; GFX10-NEXT:    global_store_dwordx2 v0, v[2:3], s[4:5]
 ; GFX10-NEXT:    s_endpgm
 ;
-; GFX11-LABEL: v_mul_i64_zext_10:
+; GFX11-LABEL: v_mul_i64_zext_src0:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    s_load_b128 s[4:7], s[0:1], 0x24
@@ -148,8 +148,8 @@ define amdgpu_kernel void @v_mul_i64_zext_10(ptr addrspace(1) %out, ptr addrspac
 }
 
 ; 64-bit multiplication where both arguments were zero extended.
-define amdgpu_kernel void @v_mul_i64_zext_11(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
-; GFX10-LABEL: v_mul_i64_zext_11:
+define amdgpu_kernel void @v_mul_i64_zext_src0_src1(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
+; GFX10-LABEL: v_mul_i64_zext_src0_src1:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_clause 0x1
 ; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
@@ -165,7 +165,7 @@ define amdgpu_kernel void @v_mul_i64_zext_11(ptr addrspace(1) %out, ptr addrspac
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
 ; GFX10-NEXT:    s_endpgm
 ;
-; GFX11-LABEL: v_mul_i64_zext_11:
+; GFX11-LABEL: v_mul_i64_zext_src0_src1:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    s_load_b128 s[4:7], s[0:1], 0x24
@@ -194,8 +194,8 @@ define amdgpu_kernel void @v_mul_i64_zext_11(ptr addrspace(1) %out, ptr addrspac
 }
 
 ; 64-bit multiplication where the upper bytes of the first argument were masked.
-define amdgpu_kernel void @v_mul_i64_and_a_hi(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
-; GFX10-LABEL: v_mul_i64_and_a_hi:
+define amdgpu_kernel void @v_mul_i64_masked_src0_hi(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
+; GFX10-LABEL: v_mul_i64_masked_src0_hi:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_clause 0x1
 ; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
@@ -213,7 +213,7 @@ define amdgpu_kernel void @v_mul_i64_and_a_hi(ptr addrspace(1) %out, ptr addrspa
 ; GFX10-NEXT:    global_store_dwordx2 v0, v[2:3], s[4:5]
 ; GFX10-NEXT:    s_endpgm
 ;
-; GFX11-LABEL: v_mul_i64_and_a_hi:
+; GFX11-LABEL: v_mul_i64_masked_src0_hi:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    s_load_b128 s[4:7], s[0:1], 0x24
@@ -243,46 +243,39 @@ define amdgpu_kernel void @v_mul_i64_and_a_hi(ptr addrspace(1) %out, ptr addrspa
 }
 
 ; 64-bit multiplication where lower bytes of first argument were masked.
-define amdgpu_kernel void @v_mul_i64_and_a_lo(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
-; GFX10-LABEL: v_mul_i64_and_a_lo:
+define amdgpu_kernel void @v_mul_i64_masked_src0_lo(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
+; GFX10-LABEL: v_mul_i64_masked_src0_lo:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
 ; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; GFX10-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v4, 3, v0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    global_load_dwordx2 v[0:1], v4, s[2:3]
-; GFX10-NEXT:    global_load_dwordx2 v[2:3], v4, s[6:7]
+; GFX10-NEXT:    global_load_dwordx2 v[0:1], v4, s[6:7]
+; GFX10-NEXT:    global_load_dwordx2 v[2:3], v4, s[2:3]
 ; GFX10-NEXT:    s_waitcnt vmcnt(1)
-; GFX10-NEXT:    v_mad_u64_u32 v[4:5], s0, 0, v0, 0
-; GFX10-NEXT:    v_mul_lo_u32 v1, 0, v1
-; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_mul_lo_u32 v0, v3, v0
-; GFX10-NEXT:    v_add3_u32 v5, v5, v1, v0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
-; GFX10-NEXT:    global_store_dwordx2 v0, v[4:5], s[4:5]
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX10-NEXT:    global_store_dwordx2 v0, v[0:1], s[4:5]
 ; GFX10-NEXT:    s_endpgm
 ;
-; GFX11-LABEL: v_mul_i64_and_a_lo:
+; GFX11-LABEL: v_mul_i64_masked_src0_lo:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    s_load_b64 s[4:5], s[0:1], 0x34
-; GFX11-NEXT:    s_load_b128 s[0:3], s[0:1], 0x24
+; GFX11-NEXT:    s_load_b128 s[4:7], s[0:1], 0x24
+; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x34
 ; GFX11-NEXT:    v_lshlrev_b32_e32 v2, 3, v0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    global_load_b64 v[0:1], v2, s[4:5]
-; GFX11-NEXT:    global_load_b64 v[2:3], v2, s[2:3]
+; GFX11-NEXT:    global_load_b64 v[0:1], v2, s[6:7]
+; GFX11-NEXT:    global_load_b64 v[2:3], v2, s[0:1]
 ; GFX11-NEXT:    s_waitcnt vmcnt(1)
-; GFX11-NEXT:    v_mad_u64_u32 v[4:5], null, 0, v0, 0
-; GFX11-NEXT:    v_mul_lo_u32 v1, 0, v1
-; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_mul_lo_u32 v0, v3, v0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_add3_u32 v5, v5, v1, v0
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
-; GFX11-NEXT:    global_store_b64 v0, v[4:5], s[0:1]
+; GFX11-NEXT:    s_waitcnt vmcnt(0)
+; GFX11-NEXT:    v_mul_lo_u32 v1, v1, v2
+; GFX11-NEXT:    global_store_b64 v0, v[0:1], s[4:5]
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX11-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -297,28 +290,27 @@ define amdgpu_kernel void @v_mul_i64_and_a_lo(ptr addrspace(1) %out, ptr addrspa
 }
 
 ; 64-bit multiplication where the lower bytes of the second argument were masked.
-define amdgpu_kernel void @v_mul_i64_and_b_lo(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
-; GFX10-LABEL: v_mul_i64_and_b_lo:
+define amdgpu_kernel void @v_mul_i64_masked_src1_lo(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
+; GFX10-LABEL: v_mul_i64_masked_src1_lo:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_clause 0x1
 ; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
 ; GFX10-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX10-NEXT:    v_lshlrev_b32_e32 v4, 3, v0
+; GFX10-NEXT:    v_lshlrev_b32_e32 v3, 3, v0
+; GFX10-NEXT:    ; kill: killed $vgpr3
+; GFX10-NEXT:    ; kill: killed $sgpr6_sgpr7
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    global_load_dwordx2 v[0:1], v4, s[6:7]
-; GFX10-NEXT:    global_load_dwordx2 v[2:3], v4, s[2:3]
-; GFX10-NEXT:    s_waitcnt vmcnt(1)
-; GFX10-NEXT:    v_mad_u64_u32 v[4:5], s0, v0, 0, 0
+; GFX10-NEXT:    global_load_dwordx2 v[0:1], v3, s[6:7]
+; GFX10-NEXT:    global_load_dwordx2 v[1:2], v3, s[2:3]
+; GFX10-NEXT:    ; kill: killed $sgpr2_sgpr3
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_mul_lo_u32 v0, v0, v3
-; GFX10-NEXT:    v_mul_lo_u32 v1, v1, 0
-; GFX10-NEXT:    v_add3_u32 v5, v5, v0, v1
+; GFX10-NEXT:    v_mul_lo_u32 v1, v0, v2
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
-; GFX10-NEXT:    global_store_dwordx2 v0, v[4:5], s[4:5]
+; GFX10-NEXT:    global_store_dwordx2 v0, v[0:1], s[4:5]
 ; GFX10-NEXT:    s_endpgm
 ;
-; GFX11-LABEL: v_mul_i64_and_b_lo:
+; GFX11-LABEL: v_mul_i64_masked_src1_lo:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    s_load_b128 s[4:7], s[0:1], 0x24
@@ -327,16 +319,11 @@ define amdgpu_kernel void @v_mul_i64_and_b_lo(ptr addrspace(1) %out, ptr addrspa
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_load_b64 v[0:1], v2, s[6:7]
-; GFX11-NEXT:    global_load_b64 v[2:3], v2, s[0:1]
-; GFX11-NEXT:    s_waitcnt vmcnt(1)
-; GFX11-NEXT:    v_mad_u64_u32 v[4:5], null, v0, 0, 0
+; GFX11-NEXT:    global_load_b64 v[1:2], v2, s[0:1]
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_mul_lo_u32 v0, v0, v3
-; GFX11-NEXT:    v_mul_lo_u32 v1, v1, 0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_add3_u32 v5, v5, v0, v1
+; GFX11-NEXT:    v_mul_lo_u32 v1, v0, v2
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
-; GFX11-NEXT:    global_store_b64 v0, v[4:5], s[4:5]
+; GFX11-NEXT:    global_store_b64 v0, v[0:1], s[4:5]
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX11-NEXT:    s_endpgm
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -351,8 +338,8 @@ define amdgpu_kernel void @v_mul_i64_and_b_lo(ptr addrspace(1) %out, ptr addrspa
 }
 
 ; 64-bit multiplication where the entire first argument is masked.
-define amdgpu_kernel void @v_mul_i64_and_hilo(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
-; GFX10-LABEL: v_mul_i64_and_hilo:
+define amdgpu_kernel void @v_mul_i64_masked_src0(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
+; GFX10-LABEL: v_mul_i64_masked_src0:
 ; GFX10:       ; %bb.0:
 ; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
@@ -362,7 +349,7 @@ define amdgpu_kernel void @v_mul_i64_and_hilo(ptr addrspace(1) %out, ptr addrspa
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX10-NEXT:    s_endpgm
 ;
-; GFX11-LABEL: v_mul_i64_and_hilo:
+; GFX11-LABEL: v_mul_i64_masked_src0:
 ; GFX11:       ; %bb.0:
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
@@ -442,8 +429,8 @@ define amdgpu_kernel void @v_mul_i64_partially_masked_src0(ptr addrspace(1) %out
 }
 
 ; 64-bit multiplication, where the first argument is masked before a branch
-define amdgpu_kernel void @mul64_and_in_branch(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
-; GFX10-LABEL: mul64_and_in_branch:
+define amdgpu_kernel void @v_mul64_masked_before_branch(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
+; GFX10-LABEL: v_mul64_masked_before_branch:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
@@ -453,7 +440,7 @@ define amdgpu_kernel void @mul64_and_in_branch(ptr addrspace(1) %out, ptr addrsp
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[0:1]
 ; GFX10-NEXT:    s_endpgm
 ;
-; GFX11-LABEL: mul64_and_in_branch:
+; GFX11-LABEL: v_mul64_masked_before_branch:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
@@ -487,44 +474,40 @@ endif:
   ret void
 }
 
-; 64-bit multiplication with both arguments changed in 
diff ernt basic blocks.
-define amdgpu_kernel void @mul64_and_in_branch_2(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
-; GFX10-LABEL: mul64_and_in_branch_2:
+; 64-bit multiplication with both arguments changed in 
diff erent basic blocks.
+define amdgpu_kernel void @v_mul64_masked_before_and_in_branch(ptr addrspace(1) %out, ptr addrspace(1) %aptr, ptr addrspace(1) %bptr) {
+; GFX10-LABEL: v_mul64_masked_before_and_in_branch:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_clause 0x1
 ; GFX10-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
 ; GFX10-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0x34
-; GFX10-NEXT:    v_lshlrev_b32_e32 v0, 3, v0
+; GFX10-NEXT:    v_lshlrev_b32_e32 v4, 3, v0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    global_load_dwordx2 v[2:3], v0, s[6:7]
-; GFX10-NEXT:    global_load_dwordx2 v[4:5], v0, s[2:3]
-; GFX10-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; GFX10-NEXT:    global_load_dwordx2 v[2:3], v4, s[6:7]
+; GFX10-NEXT:    global_load_dwordx2 v[0:1], v4, s[2:3]
 ; GFX10-NEXT:    s_waitcnt vmcnt(1)
 ; GFX10-NEXT:    v_cmp_ge_u64_e32 vcc_lo, 0, v[2:3]
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_mul_lo_u32 v5, v2, v5
+; GFX10-NEXT:    v_mul_lo_u32 v1, v2, v1
 ; GFX10-NEXT:    s_and_saveexec_b32 s0, vcc_lo
 ; GFX10-NEXT:    s_xor_b32 s0, exec_lo, s0
 ; GFX10-NEXT:  ; %bb.1: ; %else
-; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s1, v2, v4, 0
-; GFX10-NEXT:    v_mul_lo_u32 v2, 0, v4
-; GFX10-NEXT:    v_add3_u32 v1, v1, v5, v2
-; GFX10-NEXT:    ; implicit-def: $vgpr2_vgpr3
-; GFX10-NEXT:    ; implicit-def: $vgpr5
+; GFX10-NEXT:    v_mad_u64_u32 v[2:3], s1, v2, v0, 0
+; GFX10-NEXT:    v_add_nc_u32_e32 v3, v3, v1
+; GFX10-NEXT:    v_mov_b32_e32 v0, v2
+; GFX10-NEXT:    v_mov_b32_e32 v1, v3
 ; GFX10-NEXT:  ; %bb.2: ; %Flow
 ; GFX10-NEXT:    s_andn2_saveexec_b32 s0, s0
 ; GFX10-NEXT:  ; %bb.3: ; %if
-; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s1, v2, 0, 0
-; GFX10-NEXT:    v_mul_lo_u32 v2, 0, 0
-; GFX10-NEXT:    v_add3_u32 v1, v1, v5, v2
+; GFX10-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX10-NEXT:  ; %bb.4: ; %endif
 ; GFX10-NEXT:    s_or_b32 exec_lo, exec_lo, s0
 ; GFX10-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX10-NEXT:    global_store_dwordx2 v2, v[0:1], s[4:5]
 ; GFX10-NEXT:    s_endpgm
 ;
-; GFX11-LABEL: mul64_and_in_branch_2:
+; GFX11-LABEL: v_mul64_masked_before_and_in_branch:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    s_load_b128 s[4:7], s[0:1], 0x24
@@ -533,27 +516,21 @@ define amdgpu_kernel void @mul64_and_in_branch_2(ptr addrspace(1) %out, ptr addr
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_load_b64 v[2:3], v0, s[6:7]
-; GFX11-NEXT:    global_load_b64 v[4:5], v0, s[0:1]
+; GFX11-NEXT:    global_load_b64 v[0:1], v0, s[0:1]
 ; GFX11-NEXT:    s_mov_b32 s0, exec_lo
-; GFX11-NEXT:    ; implicit-def: $vgpr0_vgpr1
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
-; GFX11-NEXT:    v_mul_lo_u32 v5, v2, v5
+; GFX11-NEXT:    v_mul_lo_u32 v1, v2, v1
 ; GFX11-NEXT:    v_cmpx_ge_u64_e32 0, v[2:3]
 ; GFX11-NEXT:    s_xor_b32 s0, exec_lo, s0
 ; GFX11-NEXT:  ; %bb.1: ; %else
-; GFX11-NEXT:    v_mad_u64_u32 v[0:1], null, v2, v4, 0
-; GFX11-NEXT:    v_mul_lo_u32 v2, 0, v4
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_add3_u32 v1, v1, v5, v2
-; GFX11-NEXT:    ; implicit-def: $vgpr2_vgpr3
-; GFX11-NEXT:    ; implicit-def: $vgpr5
+; GFX11-NEXT:    v_mad_u64_u32 v[2:3], null, v2, v0, 0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_dual_mov_b32 v0, v2 :: v_dual_add_nc_u32 v3, v3, v1
+; GFX11-NEXT:    v_mov_b32_e32 v1, v3
 ; GFX11-NEXT:  ; %bb.2: ; %Flow
 ; GFX11-NEXT:    s_and_not1_saveexec_b32 s0, s0
 ; GFX11-NEXT:  ; %bb.3: ; %if
-; GFX11-NEXT:    v_mad_u64_u32 v[0:1], null, v2, 0, 0
-; GFX11-NEXT:    v_mul_lo_u32 v2, 0, 0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_add3_u32 v1, v1, v5, v2
+; GFX11-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX11-NEXT:  ; %bb.4: ; %endif
 ; GFX11-NEXT:    s_or_b32 exec_lo, exec_lo, s0
 ; GFX11-NEXT:    v_mov_b32_e32 v2, 0

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.mir
index 544a845b88ee9..5cf1779356517 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/mul-known-bits.i64.mir
@@ -46,11 +46,9 @@ body:             |
     ; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
     ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
     ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
-    ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
+    ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
     ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
-    ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
-    ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
-    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
+    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD]](s32)
     ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s32) = COPY $vgpr2
@@ -78,9 +76,7 @@ body:             |
     ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
     ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
     ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
-    ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
-    ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
-    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
+    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD]](s32)
     ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
     %0:_(s32) = COPY $vgpr0
     %1:_(s64) = COPY $vgpr2_vgpr3
@@ -129,9 +125,7 @@ body:             |
     ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
     ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
     ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
-    ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
-    ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
-    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
+    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD]](s32)
     ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s64) = COPY $vgpr2_vgpr3
@@ -156,13 +150,9 @@ body:             |
     ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64)
     ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
     ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C1]]
-    ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
-    ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
-    ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
-    ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
-    ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
-    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
+    ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
+    ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
+    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[MUL]](s32)
     ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s64) = COPY $vgpr2_vgpr3
@@ -188,13 +178,9 @@ body:             |
     ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
     ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64)
     ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C1]]
-    ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
+    ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
     ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
-    ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
-    ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
-    ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
-    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
+    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[MUL]](s32)
     ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s64) = COPY $vgpr2_vgpr3
@@ -213,20 +199,8 @@ body:             |
     ; GFX10-LABEL: name: v_mul_i64_masked_src0
     ; GFX10: liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
     ; GFX10-NEXT: {{  $}}
-    ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
-    ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
     ; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[COPY]], [[C]]
-    ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AND]](s64)
-    ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
-    ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C]]
-    ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
-    ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
-    ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
-    ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
-    ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
-    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
-    ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
+    ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[C]](s64)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s64) = COPY $vgpr2_vgpr3
     %2:_(s64) = G_CONSTANT i64 0
@@ -279,13 +253,9 @@ body:             |
     ; GFX10-NEXT: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
     ; GFX10-NEXT: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C]](s64)
     ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
-    ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C1]]
-    ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
+    ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C1]](s64)
     ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
-    ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
-    ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
-    ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
-    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
+    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[MUL]](s32)
     ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s64) = G_CONSTANT i64 -4294967296
@@ -308,11 +278,9 @@ body:             |
     ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
     ; GFX10-NEXT: [[AMDGPU_MAD_U64_U32_:%[0-9]+]]:_(s64), [[AMDGPU_MAD_U64_U32_1:%[0-9]+]]:_(s1) = G_AMDGPU_MAD_U64_U32 [[UV]](s32), [[UV2]], [[C1]]
     ; GFX10-NEXT: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[AMDGPU_MAD_U64_U32_]](s64)
-    ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV]], [[UV3]]
+    ; GFX10-NEXT: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
     ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV5]], [[MUL]]
-    ; GFX10-NEXT: [[MUL1:%[0-9]+]]:_(s32) = G_MUL [[UV1]], [[UV2]]
-    ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[ADD]], [[MUL1]]
-    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD1]](s32)
+    ; GFX10-NEXT: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UV4]](s32), [[ADD]](s32)
     ; GFX10-NEXT: $vgpr0_vgpr1 = COPY [[MV]](s64)
     %0:_(s64) = COPY $vgpr0_vgpr1
     %1:_(s64) = G_CONSTANT i64 4294967295

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
index 673dda8f59ee8..648e5214e2b1c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
@@ -1036,89 +1036,88 @@ define i64 @v_sdiv_i64_pow2k_denom(i64 %num) {
 ; CHECK-NEXT:    v_addc_u32_e32 v6, vcc, v6, v3, vcc
 ; CHECK-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s6, v5, 0
 ; CHECK-NEXT:    v_ashrrev_i32_e32 v7, 31, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
+; CHECK-NEXT:    v_add_i32_e32 v8, vcc, v0, v7
 ; CHECK-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], s6, v6, v[3:4]
-; CHECK-NEXT:    v_addc_u32_e32 v1, vcc, v1, v7, vcc
-; CHECK-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
-; CHECK-NEXT:    v_xor_b32_e32 v4, v0, v7
-; CHECK-NEXT:    v_mul_lo_u32 v0, v6, v2
-; CHECK-NEXT:    v_mul_lo_u32 v8, v5, v3
-; CHECK-NEXT:    v_xor_b32_e32 v9, v1, v7
-; CHECK-NEXT:    v_mul_hi_u32 v1, v5, v2
+; CHECK-NEXT:    v_addc_u32_e32 v9, vcc, v1, v7, vcc
+; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], -1, v5, v[3:4]
+; CHECK-NEXT:    v_xor_b32_e32 v3, v8, v7
+; CHECK-NEXT:    v_mul_lo_u32 v1, v6, v2
+; CHECK-NEXT:    v_mul_lo_u32 v8, v5, v0
+; CHECK-NEXT:    v_xor_b32_e32 v4, v9, v7
+; CHECK-NEXT:    v_mul_hi_u32 v9, v5, v2
 ; CHECK-NEXT:    v_mul_hi_u32 v2, v6, v2
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v8
-; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v1, v6, v3
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v8, v0
-; CHECK-NEXT:    v_mul_hi_u32 v8, v5, v3
-; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v8
 ; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
-; CHECK-NEXT:    v_mul_hi_u32 v3, v6, v3
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v9
 ; CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; CHECK-NEXT:    v_mul_lo_u32 v9, v6, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v8, v1
+; CHECK-NEXT:    v_mul_hi_u32 v8, v5, v0
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v9, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
+; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
+; CHECK-NEXT:    v_mul_hi_u32 v0, v6, v0
 ; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
-; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v5, v0
-; CHECK-NEXT:    v_addc_u32_e32 v1, vcc, v6, v1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v2, v9, v0
-; CHECK-NEXT:    v_mul_lo_u32 v3, v4, v1
-; CHECK-NEXT:    v_mul_hi_u32 v5, v4, v0
-; CHECK-NEXT:    v_mul_hi_u32 v0, v9, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v8, v2
+; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
+; CHECK-NEXT:    v_addc_u32_e32 v0, vcc, v6, v0, vcc
+; CHECK-NEXT:    v_mul_lo_u32 v2, v4, v1
+; CHECK-NEXT:    v_mul_lo_u32 v5, v3, v0
+; CHECK-NEXT:    v_mul_hi_u32 v8, v3, v1
+; CHECK-NEXT:    v_mul_hi_u32 v1, v4, v1
 ; CHECK-NEXT:    s_movk_i32 s6, 0x1000
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
 ; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
+; CHECK-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v5, v9, v1
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT:    v_mul_hi_u32 v3, v4, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v5, v0
+; CHECK-NEXT:    v_mul_lo_u32 v8, v4, v0
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; CHECK-NEXT:    v_mul_hi_u32 v5, v3, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v8, v1
+; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v5
 ; CHECK-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
-; CHECK-NEXT:    v_add_i32_e32 v5, vcc, v0, v2
-; CHECK-NEXT:    v_mul_hi_u32 v6, v9, v1
-; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v5, 0
+; CHECK-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; CHECK-NEXT:    v_add_i32_e32 v8, vcc, v1, v2
+; CHECK-NEXT:    v_mul_hi_u32 v9, v4, v0
+; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v8, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT:    v_add_i32_e32 v3, vcc, v6, v2
-; CHECK-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], s6, v3, v[1:2]
-; CHECK-NEXT:    v_sub_i32_e32 v0, vcc, v4, v0
-; CHECK-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], 0, v5, v[1:2]
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; CHECK-NEXT:    v_add_i32_e32 v5, vcc, v9, v2
+; CHECK-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], s6, v5, v[1:2]
+; CHECK-NEXT:    v_sub_i32_e32 v0, vcc, v3, v0
+; CHECK-NEXT:    v_subb_u32_e64 v2, s[4:5], v4, v1, vcc
+; CHECK-NEXT:    v_sub_i32_e64 v1, s[4:5], v4, v1
 ; CHECK-NEXT:    v_mov_b32_e32 v6, 0x1000
-; CHECK-NEXT:    s_bfe_i32 s6, 1, 0x10000
-; CHECK-NEXT:    v_subb_u32_e64 v2, s[4:5], v9, v1, vcc
-; CHECK-NEXT:    v_sub_i32_e64 v1, s[4:5], v9, v1
 ; CHECK-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT:    s_bfe_i32 s6, 1, 0x10000
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[4:5], v0, v6
 ; CHECK-NEXT:    v_sub_i32_e32 v0, vcc, v0, v6
-; CHECK-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[4:5]
-; CHECK-NEXT:    v_mov_b32_e32 v8, s6
+; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, -1, s[4:5]
+; CHECK-NEXT:    v_mov_b32_e32 v4, s6
 ; CHECK-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v2
 ; CHECK-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v2, v8, v4, s[4:5]
-; CHECK-NEXT:    v_add_i32_e32 v4, vcc, 1, v5
-; CHECK-NEXT:    v_addc_u32_e32 v8, vcc, 0, v3, vcc
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[4:5]
+; CHECK-NEXT:    v_add_i32_e32 v3, vcc, 1, v8
+; CHECK-NEXT:    v_addc_u32_e32 v4, vcc, 0, v5, vcc
 ; CHECK-NEXT:    s_bfe_i32 s4, 1, 0x10000
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v6
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v6, s4
 ; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v6, v0, vcc
-; CHECK-NEXT:    v_add_i32_e32 v1, vcc, 1, v4
-; CHECK-NEXT:    v_addc_u32_e32 v6, vcc, 0, v8, vcc
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, 1, v3
+; CHECK-NEXT:    v_addc_u32_e32 v6, vcc, 0, v4, vcc
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; CHECK-NEXT:    v_cndmask_b32_e32 v0, v4, v1, vcc
-; CHECK-NEXT:    v_cndmask_b32_e32 v1, v8, v6, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, v3, v1, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v1, v4, v6, vcc
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; CHECK-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
-; CHECK-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, v8, v0, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v1, v5, v1, vcc
 ; CHECK-NEXT:    v_xor_b32_e32 v0, v0, v7
 ; CHECK-NEXT:    v_xor_b32_e32 v1, v1, v7
 ; CHECK-NEXT:    v_sub_i32_e32 v0, vcc, v0, v7
@@ -1438,88 +1437,87 @@ define <2 x i64> @v_sdiv_v2i64_pow2k_denom(<2 x i64> %num) {
 ; CGP-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
 ; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; CGP-NEXT:    v_add_i32_e32 v9, vcc, v7, v4
-; CGP-NEXT:    v_addc_u32_e32 v8, vcc, v8, v5, vcc
-; CGP-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], s6, v9, 0
-; CGP-NEXT:    v_mov_b32_e32 v4, v6
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s6, v8, v[4:5]
-; CGP-NEXT:    v_ashrrev_i32_e32 v4, 31, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], -1, v9, v[6:7]
-; CGP-NEXT:    v_addc_u32_e32 v1, vcc, v1, v4, vcc
-; CGP-NEXT:    v_xor_b32_e32 v10, v0, v4
-; CGP-NEXT:    v_mul_lo_u32 v0, v8, v5
-; CGP-NEXT:    v_mul_lo_u32 v7, v9, v6
-; CGP-NEXT:    v_xor_b32_e32 v11, v1, v4
-; CGP-NEXT:    v_mul_hi_u32 v1, v9, v5
-; CGP-NEXT:    v_mul_hi_u32 v5, v8, v5
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v1, v8, v6
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
+; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s6, v9, 0
+; CGP-NEXT:    v_addc_u32_e32 v10, vcc, v8, v5, vcc
+; CGP-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
+; CGP-NEXT:    v_mov_b32_e32 v4, v7
+; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s6, v10, v[4:5]
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v0, v5
+; CGP-NEXT:    v_addc_u32_e32 v11, vcc, v1, v5, vcc
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], -1, v9, v[7:8]
+; CGP-NEXT:    v_xor_b32_e32 v8, v4, v5
+; CGP-NEXT:    v_mul_lo_u32 v1, v10, v6
+; CGP-NEXT:    v_mul_lo_u32 v4, v9, v0
 ; CGP-NEXT:    v_mul_hi_u32 v7, v9, v6
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v5
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_mul_hi_u32 v6, v10, v6
+; CGP-NEXT:    v_xor_b32_e32 v11, v11, v5
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v4
+; CGP-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; CGP-NEXT:    v_mul_hi_u32 v6, v8, v6
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
 ; CGP-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v6, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v9, v0
-; CGP-NEXT:    v_addc_u32_e32 v1, vcc, v8, v1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v5, v11, v0
-; CGP-NEXT:    v_mul_lo_u32 v6, v10, v1
-; CGP-NEXT:    v_mul_hi_u32 v7, v10, v0
-; CGP-NEXT:    v_mul_hi_u32 v0, v11, v0
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
+; CGP-NEXT:    v_mul_lo_u32 v7, v10, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
+; CGP-NEXT:    v_mul_hi_u32 v4, v9, v0
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v7, v11, v1
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT:    v_mul_hi_u32 v6, v10, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_mul_hi_u32 v0, v10, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
+; CGP-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v9, v1
+; CGP-NEXT:    v_addc_u32_e32 v0, vcc, v10, v0, vcc
+; CGP-NEXT:    v_mul_lo_u32 v6, v11, v1
+; CGP-NEXT:    v_mul_lo_u32 v7, v8, v0
+; CGP-NEXT:    v_mul_hi_u32 v9, v8, v1
+; CGP-NEXT:    v_mul_hi_u32 v1, v11, v1
+; CGP-NEXT:    v_mul_hi_u32 v10, v11, v0
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v7
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v6
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v9
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; CGP-NEXT:    v_mul_lo_u32 v9, v11, v0
 ; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; CGP-NEXT:    v_add_i32_e32 v8, vcc, v0, v5
-; CGP-NEXT:    v_mul_hi_u32 v7, v11, v1
-; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s7, v8, 0
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT:    v_add_i32_e32 v9, vcc, v7, v5
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s7, v9, v[1:2]
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v10, v0
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], 0, v8, v[6:7]
-; CGP-NEXT:    v_mov_b32_e32 v5, 0x1000
-; CGP-NEXT:    v_mov_b32_e32 v10, s8
+; CGP-NEXT:    v_mul_hi_u32 v7, v8, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v9, v1
+; CGP-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v7
+; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v7, vcc, v9, v7
+; CGP-NEXT:    v_add_i32_e32 v9, vcc, v1, v6
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s7, v9, 0
+; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_add_i32_e32 v10, vcc, v10, v6
+; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s7, v10, v[1:2]
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v8, v0
+; CGP-NEXT:    v_mov_b32_e32 v4, 0x1000
 ; CGP-NEXT:    v_subb_u32_e64 v1, s[4:5], v11, v6, vcc
 ; CGP-NEXT:    v_sub_i32_e64 v6, s[4:5], v11, v6
-; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v0, v5
+; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v0, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[4:5]
 ; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v1
 ; CGP-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v6, vcc
 ; CGP-NEXT:    v_cvt_f32_u32_e32 v6, 0x1000
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v5
-; CGP-NEXT:    v_cndmask_b32_e64 v10, v10, v7, s[4:5]
+; CGP-NEXT:    v_mov_b32_e32 v8, s8
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
+; CGP-NEXT:    v_cndmask_b32_e64 v8, v8, v7, s[4:5]
 ; CGP-NEXT:    v_subbrev_u32_e32 v7, vcc, 0, v1, vcc
 ; CGP-NEXT:    v_cvt_f32_ubyte0_e32 v1, 0
 ; CGP-NEXT:    v_mac_f32_e32 v6, 0x4f800000, v1
 ; CGP-NEXT:    v_rcp_iflag_f32_e32 v1, v6
-; CGP-NEXT:    v_add_i32_e32 v11, vcc, 1, v8
-; CGP-NEXT:    v_addc_u32_e32 v12, vcc, 0, v9, vcc
+; CGP-NEXT:    v_add_i32_e32 v11, vcc, 1, v9
+; CGP-NEXT:    v_addc_u32_e32 v12, vcc, 0, v10, vcc
 ; CGP-NEXT:    v_mul_f32_e32 v1, 0x5f7ffffc, v1
 ; CGP-NEXT:    v_mul_f32_e32 v6, 0x2f800000, v1
 ; CGP-NEXT:    v_trunc_f32_e32 v6, v6
 ; CGP-NEXT:    v_mac_f32_e32 v1, 0xcf800000, v6
 ; CGP-NEXT:    v_cvt_u32_f32_e32 v13, v1
 ; CGP-NEXT:    s_bfe_i32 s4, 1, 0x10000
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v5
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v4
 ; CGP-NEXT:    v_mov_b32_e32 v15, s4
 ; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v13, 0
 ; CGP-NEXT:    v_cvt_u32_f32_e32 v16, v6
@@ -1557,96 +1555,94 @@ define <2 x i64> @v_sdiv_v2i64_pow2k_denom(<2 x i64> %num) {
 ; CGP-NEXT:    v_add_i32_e32 v11, vcc, v13, v0
 ; CGP-NEXT:    v_addc_u32_e32 v13, vcc, v16, v1, vcc
 ; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v11, 0
-; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v10
-; CGP-NEXT:    v_cndmask_b32_e32 v8, v8, v7, vcc
+; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v8
+; CGP-NEXT:    v_cndmask_b32_e32 v6, v9, v7, vcc
+; CGP-NEXT:    v_xor_b32_e32 v9, v6, v5
 ; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s6, v13, v[1:2]
-; CGP-NEXT:    v_xor_b32_e32 v1, v8, v4
-; CGP-NEXT:    v_ashrrev_i32_e32 v8, 31, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v8, v10, v12, vcc
+; CGP-NEXT:    v_xor_b32_e32 v1, v8, v5
 ; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], -1, v11, v[6:7]
-; CGP-NEXT:    v_cndmask_b32_e32 v9, v9, v12, vcc
+; CGP-NEXT:    v_ashrrev_i32_e32 v8, 31, v3
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
 ; CGP-NEXT:    v_addc_u32_e32 v3, vcc, v3, v8, vcc
-; CGP-NEXT:    v_xor_b32_e32 v10, v2, v8
+; CGP-NEXT:    v_xor_b32_e32 v7, v2, v8
 ; CGP-NEXT:    v_mul_lo_u32 v2, v13, v0
-; CGP-NEXT:    v_mul_lo_u32 v7, v11, v6
+; CGP-NEXT:    v_mul_lo_u32 v10, v11, v6
 ; CGP-NEXT:    v_xor_b32_e32 v12, v3, v8
 ; CGP-NEXT:    v_mul_hi_u32 v3, v11, v0
 ; CGP-NEXT:    v_mul_hi_u32 v0, v13, v0
-; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v10
+; CGP-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; CGP-NEXT:    v_mul_lo_u32 v3, v13, v6
-; CGP-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
-; CGP-NEXT:    v_mul_hi_u32 v7, v11, v6
+; CGP-NEXT:    v_add_i32_e32 v2, vcc, v10, v2
+; CGP-NEXT:    v_mul_hi_u32 v10, v11, v6
 ; CGP-NEXT:    v_add_i32_e32 v0, vcc, v3, v0
 ; CGP-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v7
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v10
+; CGP-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v10
 ; CGP-NEXT:    v_mul_hi_u32 v6, v13, v6
 ; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v6, v2
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v11, v0
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v11, v0
 ; CGP-NEXT:    v_addc_u32_e32 v2, vcc, v13, v2, vcc
-; CGP-NEXT:    v_mul_lo_u32 v3, v12, v0
-; CGP-NEXT:    v_mul_lo_u32 v6, v10, v2
-; CGP-NEXT:    v_mul_hi_u32 v7, v10, v0
-; CGP-NEXT:    v_mul_hi_u32 v0, v12, v0
-; CGP-NEXT:    v_xor_b32_e32 v9, v9, v4
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
-; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v7, v12, v2
+; CGP-NEXT:    v_mul_lo_u32 v6, v12, v3
+; CGP-NEXT:    v_mul_lo_u32 v10, v7, v2
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v9, v5
+; CGP-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; CGP-NEXT:    v_mul_hi_u32 v5, v7, v3
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v10
+; CGP-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_mul_lo_u32 v6, v12, v2
+; CGP-NEXT:    v_mul_hi_u32 v3, v12, v3
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v9, v5
+; CGP-NEXT:    v_mul_hi_u32 v9, v7, v2
 ; CGP-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; CGP-NEXT:    v_mul_hi_u32 v6, v10, v2
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v6
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; CGP-NEXT:    v_add_i32_e32 v11, vcc, v0, v3
-; CGP-NEXT:    v_mul_hi_u32 v7, v12, v2
-; CGP-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s7, v11, 0
-; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v6, v0
-; CGP-NEXT:    v_add_i32_e32 v13, vcc, v7, v0
-; CGP-NEXT:    v_mov_b32_e32 v0, v3
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s7, v13, v[0:1]
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v1, v4
-; CGP-NEXT:    v_subb_u32_e32 v1, vcc, v9, v4, vcc
-; CGP-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], 0, v11, v[6:7]
-; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v10, v2
-; CGP-NEXT:    v_subb_u32_e64 v4, s[4:5], v12, v3, vcc
-; CGP-NEXT:    v_sub_i32_e64 v3, s[4:5], v12, v3
-; CGP-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v9
+; CGP-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v9
+; CGP-NEXT:    v_add_i32_e32 v9, vcc, v3, v5
+; CGP-NEXT:    v_mul_hi_u32 v10, v12, v2
+; CGP-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s7, v9, 0
+; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; CGP-NEXT:    v_add_i32_e32 v10, vcc, v10, v5
+; CGP-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], s7, v10, v[3:4]
+; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v7, v2
+; CGP-NEXT:    v_subb_u32_e64 v3, s[4:5], v12, v5, vcc
+; CGP-NEXT:    v_sub_i32_e64 v5, s[4:5], v12, v5
+; CGP-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
 ; CGP-NEXT:    s_bfe_i32 s6, 1, 0x10000
-; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v2, v5
-; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v5
+; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v2, v4
+; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[4:5]
 ; CGP-NEXT:    v_mov_b32_e32 v7, s6
-; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v4
-; CGP-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; CGP-NEXT:    v_cndmask_b32_e64 v4, v7, v6, s[4:5]
-; CGP-NEXT:    v_add_i32_e32 v6, vcc, 1, v11
-; CGP-NEXT:    v_addc_u32_e32 v7, vcc, 0, v13, vcc
+; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v3
+; CGP-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; CGP-NEXT:    v_cndmask_b32_e64 v3, v7, v6, s[4:5]
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, 1, v9
+; CGP-NEXT:    v_addc_u32_e32 v7, vcc, 0, v10, vcc
 ; CGP-NEXT:    s_bfe_i32 s4, 1, 0x10000
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v5
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
-; CGP-NEXT:    v_mov_b32_e32 v5, s4
-; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
-; CGP-NEXT:    v_cndmask_b32_e32 v2, v5, v2, vcc
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, 1, v6
+; CGP-NEXT:    v_mov_b32_e32 v4, s4
+; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, 1, v6
 ; CGP-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
 ; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; CGP-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
-; CGP-NEXT:    v_cndmask_b32_e32 v3, v7, v5, vcc
-; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; CGP-NEXT:    v_cndmask_b32_e32 v2, v11, v2, vcc
-; CGP-NEXT:    v_cndmask_b32_e32 v3, v13, v3, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v6, v4, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v4, v7, v5, vcc
+; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v9, v2, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v3, v10, v4, vcc
 ; CGP-NEXT:    v_xor_b32_e32 v2, v2, v8
 ; CGP-NEXT:    v_xor_b32_e32 v3, v3, v8
 ; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v8
@@ -1700,89 +1696,88 @@ define i64 @v_sdiv_i64_oddk_denom(i64 %num) {
 ; CHECK-NEXT:    v_addc_u32_e32 v6, vcc, v6, v3, vcc
 ; CHECK-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s6, v5, 0
 ; CHECK-NEXT:    v_ashrrev_i32_e32 v7, 31, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
+; CHECK-NEXT:    v_add_i32_e32 v8, vcc, v0, v7
 ; CHECK-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], s6, v6, v[3:4]
-; CHECK-NEXT:    v_addc_u32_e32 v1, vcc, v1, v7, vcc
-; CHECK-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
-; CHECK-NEXT:    v_xor_b32_e32 v4, v0, v7
-; CHECK-NEXT:    v_mul_lo_u32 v0, v6, v2
-; CHECK-NEXT:    v_mul_lo_u32 v8, v5, v3
-; CHECK-NEXT:    v_xor_b32_e32 v9, v1, v7
-; CHECK-NEXT:    v_mul_hi_u32 v1, v5, v2
+; CHECK-NEXT:    v_addc_u32_e32 v9, vcc, v1, v7, vcc
+; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], -1, v5, v[3:4]
+; CHECK-NEXT:    v_xor_b32_e32 v3, v8, v7
+; CHECK-NEXT:    v_mul_lo_u32 v1, v6, v2
+; CHECK-NEXT:    v_mul_lo_u32 v8, v5, v0
+; CHECK-NEXT:    v_xor_b32_e32 v4, v9, v7
+; CHECK-NEXT:    v_mul_hi_u32 v9, v5, v2
 ; CHECK-NEXT:    v_mul_hi_u32 v2, v6, v2
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v8
-; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v1, v6, v3
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v8, v0
-; CHECK-NEXT:    v_mul_hi_u32 v8, v5, v3
-; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v8
 ; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
-; CHECK-NEXT:    v_mul_hi_u32 v3, v6, v3
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v9
 ; CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; CHECK-NEXT:    v_mul_lo_u32 v9, v6, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v8, v1
+; CHECK-NEXT:    v_mul_hi_u32 v8, v5, v0
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v9, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
+; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
+; CHECK-NEXT:    v_mul_hi_u32 v0, v6, v0
 ; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
-; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v5, v0
-; CHECK-NEXT:    v_addc_u32_e32 v1, vcc, v6, v1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v2, v9, v0
-; CHECK-NEXT:    v_mul_lo_u32 v3, v4, v1
-; CHECK-NEXT:    v_mul_hi_u32 v5, v4, v0
-; CHECK-NEXT:    v_mul_hi_u32 v0, v9, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v8, v2
+; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
+; CHECK-NEXT:    v_addc_u32_e32 v0, vcc, v6, v0, vcc
+; CHECK-NEXT:    v_mul_lo_u32 v2, v4, v1
+; CHECK-NEXT:    v_mul_lo_u32 v5, v3, v0
+; CHECK-NEXT:    v_mul_hi_u32 v8, v3, v1
+; CHECK-NEXT:    v_mul_hi_u32 v1, v4, v1
 ; CHECK-NEXT:    s_mov_b32 s6, 0x12d8fb
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
 ; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
+; CHECK-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v5, v9, v1
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT:    v_mul_hi_u32 v3, v4, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v5, v0
+; CHECK-NEXT:    v_mul_lo_u32 v8, v4, v0
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; CHECK-NEXT:    v_mul_hi_u32 v5, v3, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v8, v1
+; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v5
 ; CHECK-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
-; CHECK-NEXT:    v_add_i32_e32 v5, vcc, v0, v2
-; CHECK-NEXT:    v_mul_hi_u32 v6, v9, v1
-; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v5, 0
+; CHECK-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; CHECK-NEXT:    v_add_i32_e32 v8, vcc, v1, v2
+; CHECK-NEXT:    v_mul_hi_u32 v9, v4, v0
+; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v8, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT:    v_add_i32_e32 v3, vcc, v6, v2
-; CHECK-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], s6, v3, v[1:2]
-; CHECK-NEXT:    v_sub_i32_e32 v0, vcc, v4, v0
-; CHECK-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], 0, v5, v[1:2]
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; CHECK-NEXT:    v_add_i32_e32 v5, vcc, v9, v2
+; CHECK-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], s6, v5, v[1:2]
+; CHECK-NEXT:    v_sub_i32_e32 v0, vcc, v3, v0
+; CHECK-NEXT:    v_subb_u32_e64 v2, s[4:5], v4, v1, vcc
+; CHECK-NEXT:    v_sub_i32_e64 v1, s[4:5], v4, v1
 ; CHECK-NEXT:    v_mov_b32_e32 v6, 0x12d8fb
-; CHECK-NEXT:    s_bfe_i32 s6, 1, 0x10000
-; CHECK-NEXT:    v_subb_u32_e64 v2, s[4:5], v9, v1, vcc
-; CHECK-NEXT:    v_sub_i32_e64 v1, s[4:5], v9, v1
 ; CHECK-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
+; CHECK-NEXT:    s_bfe_i32 s6, 1, 0x10000
 ; CHECK-NEXT:    v_cmp_ge_u32_e64 s[4:5], v0, v6
 ; CHECK-NEXT:    v_sub_i32_e32 v0, vcc, v0, v6
-; CHECK-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[4:5]
-; CHECK-NEXT:    v_mov_b32_e32 v8, s6
+; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, -1, s[4:5]
+; CHECK-NEXT:    v_mov_b32_e32 v4, s6
 ; CHECK-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v2
 ; CHECK-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v2, v8, v4, s[4:5]
-; CHECK-NEXT:    v_add_i32_e32 v4, vcc, 1, v5
-; CHECK-NEXT:    v_addc_u32_e32 v8, vcc, 0, v3, vcc
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, v4, v3, s[4:5]
+; CHECK-NEXT:    v_add_i32_e32 v3, vcc, 1, v8
+; CHECK-NEXT:    v_addc_u32_e32 v4, vcc, 0, v5, vcc
 ; CHECK-NEXT:    s_bfe_i32 s4, 1, 0x10000
 ; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v6
 ; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
 ; CHECK-NEXT:    v_mov_b32_e32 v6, s4
 ; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v6, v0, vcc
-; CHECK-NEXT:    v_add_i32_e32 v1, vcc, 1, v4
-; CHECK-NEXT:    v_addc_u32_e32 v6, vcc, 0, v8, vcc
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, 1, v3
+; CHECK-NEXT:    v_addc_u32_e32 v6, vcc, 0, v4, vcc
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; CHECK-NEXT:    v_cndmask_b32_e32 v0, v4, v1, vcc
-; CHECK-NEXT:    v_cndmask_b32_e32 v1, v8, v6, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, v3, v1, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v1, v4, v6, vcc
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; CHECK-NEXT:    v_cndmask_b32_e32 v0, v5, v0, vcc
-; CHECK-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, v8, v0, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v1, v5, v1, vcc
 ; CHECK-NEXT:    v_xor_b32_e32 v0, v0, v7
 ; CHECK-NEXT:    v_xor_b32_e32 v1, v1, v7
 ; CHECK-NEXT:    v_sub_i32_e32 v0, vcc, v0, v7
@@ -2102,88 +2097,87 @@ define <2 x i64> @v_sdiv_v2i64_oddk_denom(<2 x i64> %num) {
 ; CGP-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
 ; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; CGP-NEXT:    v_add_i32_e32 v9, vcc, v7, v4
-; CGP-NEXT:    v_addc_u32_e32 v8, vcc, v8, v5, vcc
-; CGP-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], s6, v9, 0
-; CGP-NEXT:    v_mov_b32_e32 v4, v6
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s6, v8, v[4:5]
-; CGP-NEXT:    v_ashrrev_i32_e32 v4, 31, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], -1, v9, v[6:7]
-; CGP-NEXT:    v_addc_u32_e32 v1, vcc, v1, v4, vcc
-; CGP-NEXT:    v_xor_b32_e32 v10, v0, v4
-; CGP-NEXT:    v_mul_lo_u32 v0, v8, v5
-; CGP-NEXT:    v_mul_lo_u32 v7, v9, v6
-; CGP-NEXT:    v_xor_b32_e32 v11, v1, v4
-; CGP-NEXT:    v_mul_hi_u32 v1, v9, v5
-; CGP-NEXT:    v_mul_hi_u32 v5, v8, v5
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v1, v8, v6
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
+; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s6, v9, 0
+; CGP-NEXT:    v_addc_u32_e32 v10, vcc, v8, v5, vcc
+; CGP-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
+; CGP-NEXT:    v_mov_b32_e32 v4, v7
+; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s6, v10, v[4:5]
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v0, v5
+; CGP-NEXT:    v_addc_u32_e32 v11, vcc, v1, v5, vcc
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], -1, v9, v[7:8]
+; CGP-NEXT:    v_xor_b32_e32 v8, v4, v5
+; CGP-NEXT:    v_mul_lo_u32 v1, v10, v6
+; CGP-NEXT:    v_mul_lo_u32 v4, v9, v0
 ; CGP-NEXT:    v_mul_hi_u32 v7, v9, v6
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v5
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_mul_hi_u32 v6, v10, v6
+; CGP-NEXT:    v_xor_b32_e32 v11, v11, v5
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v4
+; CGP-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; CGP-NEXT:    v_mul_hi_u32 v6, v8, v6
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
 ; CGP-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v6, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v9, v0
-; CGP-NEXT:    v_addc_u32_e32 v1, vcc, v8, v1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v5, v11, v0
-; CGP-NEXT:    v_mul_lo_u32 v6, v10, v1
-; CGP-NEXT:    v_mul_hi_u32 v7, v10, v0
-; CGP-NEXT:    v_mul_hi_u32 v0, v11, v0
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
+; CGP-NEXT:    v_mul_lo_u32 v7, v10, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
+; CGP-NEXT:    v_mul_hi_u32 v4, v9, v0
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v7, v11, v1
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT:    v_mul_hi_u32 v6, v10, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_mul_hi_u32 v0, v10, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
+; CGP-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v9, v1
+; CGP-NEXT:    v_addc_u32_e32 v0, vcc, v10, v0, vcc
+; CGP-NEXT:    v_mul_lo_u32 v6, v11, v1
+; CGP-NEXT:    v_mul_lo_u32 v7, v8, v0
+; CGP-NEXT:    v_mul_hi_u32 v9, v8, v1
+; CGP-NEXT:    v_mul_hi_u32 v1, v11, v1
+; CGP-NEXT:    v_mul_hi_u32 v10, v11, v0
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v7
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v6
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v9
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; CGP-NEXT:    v_mul_lo_u32 v9, v11, v0
 ; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; CGP-NEXT:    v_add_i32_e32 v8, vcc, v0, v5
-; CGP-NEXT:    v_mul_hi_u32 v7, v11, v1
-; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s7, v8, 0
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT:    v_add_i32_e32 v9, vcc, v7, v5
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s7, v9, v[1:2]
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v10, v0
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], 0, v8, v[6:7]
-; CGP-NEXT:    v_mov_b32_e32 v5, 0x12d8fb
-; CGP-NEXT:    v_mov_b32_e32 v10, s8
+; CGP-NEXT:    v_mul_hi_u32 v7, v8, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v9, v1
+; CGP-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v7
+; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v7, vcc, v9, v7
+; CGP-NEXT:    v_add_i32_e32 v9, vcc, v1, v6
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s7, v9, 0
+; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_add_i32_e32 v10, vcc, v10, v6
+; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s7, v10, v[1:2]
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v8, v0
+; CGP-NEXT:    v_mov_b32_e32 v4, 0x12d8fb
 ; CGP-NEXT:    v_subb_u32_e64 v1, s[4:5], v11, v6, vcc
 ; CGP-NEXT:    v_sub_i32_e64 v6, s[4:5], v11, v6
-; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v0, v5
+; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v0, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[4:5]
 ; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v1
 ; CGP-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v6, vcc
 ; CGP-NEXT:    v_cvt_f32_u32_e32 v6, 0x12d8fb
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v5
-; CGP-NEXT:    v_cndmask_b32_e64 v10, v10, v7, s[4:5]
+; CGP-NEXT:    v_mov_b32_e32 v8, s8
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
+; CGP-NEXT:    v_cndmask_b32_e64 v8, v8, v7, s[4:5]
 ; CGP-NEXT:    v_subbrev_u32_e32 v7, vcc, 0, v1, vcc
 ; CGP-NEXT:    v_cvt_f32_ubyte0_e32 v1, 0
 ; CGP-NEXT:    v_mac_f32_e32 v6, 0x4f800000, v1
 ; CGP-NEXT:    v_rcp_iflag_f32_e32 v1, v6
-; CGP-NEXT:    v_add_i32_e32 v11, vcc, 1, v8
-; CGP-NEXT:    v_addc_u32_e32 v12, vcc, 0, v9, vcc
+; CGP-NEXT:    v_add_i32_e32 v11, vcc, 1, v9
+; CGP-NEXT:    v_addc_u32_e32 v12, vcc, 0, v10, vcc
 ; CGP-NEXT:    v_mul_f32_e32 v1, 0x5f7ffffc, v1
 ; CGP-NEXT:    v_mul_f32_e32 v6, 0x2f800000, v1
 ; CGP-NEXT:    v_trunc_f32_e32 v6, v6
 ; CGP-NEXT:    v_mac_f32_e32 v1, 0xcf800000, v6
 ; CGP-NEXT:    v_cvt_u32_f32_e32 v13, v1
 ; CGP-NEXT:    s_bfe_i32 s4, 1, 0x10000
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v5
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v4
 ; CGP-NEXT:    v_mov_b32_e32 v15, s4
 ; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v13, 0
 ; CGP-NEXT:    v_cvt_u32_f32_e32 v16, v6
@@ -2221,96 +2215,94 @@ define <2 x i64> @v_sdiv_v2i64_oddk_denom(<2 x i64> %num) {
 ; CGP-NEXT:    v_add_i32_e32 v11, vcc, v13, v0
 ; CGP-NEXT:    v_addc_u32_e32 v13, vcc, v16, v1, vcc
 ; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v11, 0
-; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v10
-; CGP-NEXT:    v_cndmask_b32_e32 v8, v8, v7, vcc
+; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v8
+; CGP-NEXT:    v_cndmask_b32_e32 v6, v9, v7, vcc
+; CGP-NEXT:    v_xor_b32_e32 v9, v6, v5
 ; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s6, v13, v[1:2]
-; CGP-NEXT:    v_xor_b32_e32 v1, v8, v4
-; CGP-NEXT:    v_ashrrev_i32_e32 v8, 31, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v8, v10, v12, vcc
+; CGP-NEXT:    v_xor_b32_e32 v1, v8, v5
 ; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], -1, v11, v[6:7]
-; CGP-NEXT:    v_cndmask_b32_e32 v9, v9, v12, vcc
+; CGP-NEXT:    v_ashrrev_i32_e32 v8, 31, v3
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
 ; CGP-NEXT:    v_addc_u32_e32 v3, vcc, v3, v8, vcc
-; CGP-NEXT:    v_xor_b32_e32 v10, v2, v8
+; CGP-NEXT:    v_xor_b32_e32 v7, v2, v8
 ; CGP-NEXT:    v_mul_lo_u32 v2, v13, v0
-; CGP-NEXT:    v_mul_lo_u32 v7, v11, v6
+; CGP-NEXT:    v_mul_lo_u32 v10, v11, v6
 ; CGP-NEXT:    v_xor_b32_e32 v12, v3, v8
 ; CGP-NEXT:    v_mul_hi_u32 v3, v11, v0
 ; CGP-NEXT:    v_mul_hi_u32 v0, v13, v0
-; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v10
+; CGP-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
 ; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; CGP-NEXT:    v_mul_lo_u32 v3, v13, v6
-; CGP-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
-; CGP-NEXT:    v_mul_hi_u32 v7, v11, v6
+; CGP-NEXT:    v_add_i32_e32 v2, vcc, v10, v2
+; CGP-NEXT:    v_mul_hi_u32 v10, v11, v6
 ; CGP-NEXT:    v_add_i32_e32 v0, vcc, v3, v0
 ; CGP-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v7
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v10
+; CGP-NEXT:    v_cndmask_b32_e64 v10, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v10
 ; CGP-NEXT:    v_mul_hi_u32 v6, v13, v6
 ; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
 ; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v6, v2
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v11, v0
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v11, v0
 ; CGP-NEXT:    v_addc_u32_e32 v2, vcc, v13, v2, vcc
-; CGP-NEXT:    v_mul_lo_u32 v3, v12, v0
-; CGP-NEXT:    v_mul_lo_u32 v6, v10, v2
-; CGP-NEXT:    v_mul_hi_u32 v7, v10, v0
-; CGP-NEXT:    v_mul_hi_u32 v0, v12, v0
-; CGP-NEXT:    v_xor_b32_e32 v9, v9, v4
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v6
-; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v7, v12, v2
+; CGP-NEXT:    v_mul_lo_u32 v6, v12, v3
+; CGP-NEXT:    v_mul_lo_u32 v10, v7, v2
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v9, v5
+; CGP-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; CGP-NEXT:    v_mul_hi_u32 v5, v7, v3
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v10
+; CGP-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_mul_lo_u32 v6, v12, v2
+; CGP-NEXT:    v_mul_hi_u32 v3, v12, v3
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v9, v5
+; CGP-NEXT:    v_mul_hi_u32 v9, v7, v2
 ; CGP-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
-; CGP-NEXT:    v_mul_hi_u32 v6, v10, v2
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v6
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; CGP-NEXT:    v_add_i32_e32 v11, vcc, v0, v3
-; CGP-NEXT:    v_mul_hi_u32 v7, v12, v2
-; CGP-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s7, v11, 0
-; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v6, v0
-; CGP-NEXT:    v_add_i32_e32 v13, vcc, v7, v0
-; CGP-NEXT:    v_mov_b32_e32 v0, v3
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s7, v13, v[0:1]
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v1, v4
-; CGP-NEXT:    v_subb_u32_e32 v1, vcc, v9, v4, vcc
-; CGP-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], 0, v11, v[6:7]
-; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v10, v2
-; CGP-NEXT:    v_subb_u32_e64 v4, s[4:5], v12, v3, vcc
-; CGP-NEXT:    v_sub_i32_e64 v3, s[4:5], v12, v3
-; CGP-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v9
+; CGP-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v9
+; CGP-NEXT:    v_add_i32_e32 v9, vcc, v3, v5
+; CGP-NEXT:    v_mul_hi_u32 v10, v12, v2
+; CGP-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s7, v9, 0
+; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
+; CGP-NEXT:    v_add_i32_e32 v10, vcc, v10, v5
+; CGP-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], s7, v10, v[3:4]
+; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v7, v2
+; CGP-NEXT:    v_subb_u32_e64 v3, s[4:5], v12, v5, vcc
+; CGP-NEXT:    v_sub_i32_e64 v5, s[4:5], v12, v5
+; CGP-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
 ; CGP-NEXT:    s_bfe_i32 s6, 1, 0x10000
-; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v2, v5
-; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v5
+; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v2, v4
+; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[4:5]
 ; CGP-NEXT:    v_mov_b32_e32 v7, s6
-; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v4
-; CGP-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; CGP-NEXT:    v_cndmask_b32_e64 v4, v7, v6, s[4:5]
-; CGP-NEXT:    v_add_i32_e32 v6, vcc, 1, v11
-; CGP-NEXT:    v_addc_u32_e32 v7, vcc, 0, v13, vcc
+; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v3
+; CGP-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; CGP-NEXT:    v_cndmask_b32_e64 v3, v7, v6, s[4:5]
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, 1, v9
+; CGP-NEXT:    v_addc_u32_e32 v7, vcc, 0, v10, vcc
 ; CGP-NEXT:    s_bfe_i32 s4, 1, 0x10000
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v5
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v2, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, -1, vcc
-; CGP-NEXT:    v_mov_b32_e32 v5, s4
-; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
-; CGP-NEXT:    v_cndmask_b32_e32 v2, v5, v2, vcc
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, 1, v6
+; CGP-NEXT:    v_mov_b32_e32 v4, s4
+; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v4, v2, vcc
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, 1, v6
 ; CGP-NEXT:    v_addc_u32_e32 v5, vcc, 0, v7, vcc
 ; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
-; CGP-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
-; CGP-NEXT:    v_cndmask_b32_e32 v3, v7, v5, vcc
-; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; CGP-NEXT:    v_cndmask_b32_e32 v2, v11, v2, vcc
-; CGP-NEXT:    v_cndmask_b32_e32 v3, v13, v3, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v6, v4, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v4, v7, v5, vcc
+; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v9, v2, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v3, v10, v4, vcc
 ; CGP-NEXT:    v_xor_b32_e32 v2, v2, v8
 ; CGP-NEXT:    v_xor_b32_e32 v3, v3, v8
 ; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v8

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
index 30ec4f1540730..fe25c445218db 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
@@ -1017,84 +1017,83 @@ define i64 @v_srem_i64_pow2k_denom(i64 %num) {
 ; CHECK-NEXT:    v_addc_u32_e32 v6, vcc, v6, v3, vcc
 ; CHECK-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s6, v5, 0
 ; CHECK-NEXT:    v_ashrrev_i32_e32 v7, 31, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
+; CHECK-NEXT:    v_add_i32_e32 v8, vcc, v0, v7
 ; CHECK-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], s6, v6, v[3:4]
-; CHECK-NEXT:    v_addc_u32_e32 v1, vcc, v1, v7, vcc
-; CHECK-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
-; CHECK-NEXT:    v_xor_b32_e32 v4, v0, v7
-; CHECK-NEXT:    v_mul_lo_u32 v0, v6, v2
-; CHECK-NEXT:    v_mul_lo_u32 v8, v5, v3
-; CHECK-NEXT:    v_xor_b32_e32 v9, v1, v7
-; CHECK-NEXT:    v_mul_hi_u32 v1, v5, v2
+; CHECK-NEXT:    v_addc_u32_e32 v9, vcc, v1, v7, vcc
+; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], -1, v5, v[3:4]
+; CHECK-NEXT:    v_xor_b32_e32 v3, v8, v7
+; CHECK-NEXT:    v_mul_lo_u32 v1, v6, v2
+; CHECK-NEXT:    v_mul_lo_u32 v8, v5, v0
+; CHECK-NEXT:    v_xor_b32_e32 v4, v9, v7
+; CHECK-NEXT:    v_mul_hi_u32 v9, v5, v2
 ; CHECK-NEXT:    v_mul_hi_u32 v2, v6, v2
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v8
-; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v1, v6, v3
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v8, v0
-; CHECK-NEXT:    v_mul_hi_u32 v8, v5, v3
-; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v8
 ; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
-; CHECK-NEXT:    v_mul_hi_u32 v3, v6, v3
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v9
 ; CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; CHECK-NEXT:    v_mul_lo_u32 v9, v6, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v8, v1
+; CHECK-NEXT:    v_mul_hi_u32 v8, v5, v0
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v9, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
+; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
+; CHECK-NEXT:    v_mul_hi_u32 v0, v6, v0
 ; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
-; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v5, v0
-; CHECK-NEXT:    v_addc_u32_e32 v1, vcc, v6, v1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v2, v9, v0
-; CHECK-NEXT:    v_mul_lo_u32 v3, v4, v1
-; CHECK-NEXT:    v_mul_hi_u32 v5, v4, v0
-; CHECK-NEXT:    v_mul_hi_u32 v0, v9, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v8, v2
+; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
+; CHECK-NEXT:    v_addc_u32_e32 v0, vcc, v6, v0, vcc
+; CHECK-NEXT:    v_mul_lo_u32 v2, v4, v1
+; CHECK-NEXT:    v_mul_lo_u32 v5, v3, v0
+; CHECK-NEXT:    v_mul_hi_u32 v8, v3, v1
+; CHECK-NEXT:    v_mul_hi_u32 v1, v4, v1
 ; CHECK-NEXT:    s_movk_i32 s6, 0x1000
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
 ; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
+; CHECK-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v5, v9, v1
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT:    v_mul_hi_u32 v3, v4, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v5, v0
+; CHECK-NEXT:    v_mul_lo_u32 v8, v4, v0
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; CHECK-NEXT:    v_mul_hi_u32 v5, v3, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v8, v1
+; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v5
 ; CHECK-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
-; CHECK-NEXT:    v_add_i32_e32 v5, vcc, v0, v2
-; CHECK-NEXT:    v_mul_hi_u32 v6, v9, v1
-; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v5, 0
+; CHECK-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
+; CHECK-NEXT:    v_mul_hi_u32 v8, v4, v0
+; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v1, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v6, v2
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v8, v2
 ; CHECK-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], s6, v2, v[1:2]
-; CHECK-NEXT:    v_mov_b32_e32 v3, 0x1000
+; CHECK-NEXT:    v_sub_i32_e64 v0, s[4:5], v3, v0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0x1000
+; CHECK-NEXT:    v_subb_u32_e64 v2, vcc, v4, v1, s[4:5]
+; CHECK-NEXT:    v_sub_i32_e32 v1, vcc, v4, v1
 ; CHECK-NEXT:    s_bfe_i32 s6, 1, 0x10000
-; CHECK-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], 0, v5, v[1:2]
-; CHECK-NEXT:    v_sub_i32_e64 v0, s[4:5], v4, v0
-; CHECK-NEXT:    v_subb_u32_e64 v2, vcc, v9, v1, s[4:5]
-; CHECK-NEXT:    v_sub_i32_e32 v1, vcc, v9, v1
-; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
-; CHECK-NEXT:    v_mov_b32_e32 v5, s6
+; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v6
+; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v4, s6
 ; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; CHECK-NEXT:    v_cndmask_b32_e32 v4, v5, v4, vcc
-; CHECK-NEXT:    v_sub_i32_e32 v6, vcc, v0, v3
+; CHECK-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
+; CHECK-NEXT:    v_sub_i32_e32 v5, vcc, v0, v6
 ; CHECK-NEXT:    v_subbrev_u32_e64 v1, s[4:5], 0, v1, s[4:5]
 ; CHECK-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v6, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
+; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v6
+; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
 ; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; CHECK-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
-; CHECK-NEXT:    v_subrev_i32_e32 v5, vcc, 0x1000, v6
+; CHECK-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
+; CHECK-NEXT:    v_subrev_i32_e32 v6, vcc, 0x1000, v5
 ; CHECK-NEXT:    v_subbrev_u32_e32 v8, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
-; CHECK-NEXT:    v_cndmask_b32_e32 v3, v6, v5, vcc
-; CHECK-NEXT:    v_cndmask_b32_e32 v1, v1, v8, vcc
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; CHECK-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v1, v1, v8, vcc
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; CHECK-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
 ; CHECK-NEXT:    v_xor_b32_e32 v0, v0, v7
 ; CHECK-NEXT:    v_xor_b32_e32 v1, v1, v7
@@ -1413,92 +1412,91 @@ define <2 x i64> @v_srem_v2i64_pow2k_denom(<2 x i64> %num) {
 ; CGP-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
 ; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; CGP-NEXT:    v_add_i32_e32 v9, vcc, v7, v4
-; CGP-NEXT:    v_addc_u32_e32 v8, vcc, v8, v5, vcc
-; CGP-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], s6, v9, 0
-; CGP-NEXT:    v_mov_b32_e32 v4, v6
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s6, v8, v[4:5]
-; CGP-NEXT:    v_ashrrev_i32_e32 v4, 31, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], -1, v9, v[6:7]
-; CGP-NEXT:    v_addc_u32_e32 v1, vcc, v1, v4, vcc
-; CGP-NEXT:    v_xor_b32_e32 v10, v0, v4
-; CGP-NEXT:    v_mul_lo_u32 v0, v8, v5
-; CGP-NEXT:    v_mul_lo_u32 v7, v9, v6
-; CGP-NEXT:    v_xor_b32_e32 v11, v1, v4
-; CGP-NEXT:    v_mul_hi_u32 v1, v9, v5
-; CGP-NEXT:    v_mul_hi_u32 v5, v8, v5
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v1, v8, v6
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
+; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s6, v9, 0
+; CGP-NEXT:    v_addc_u32_e32 v10, vcc, v8, v5, vcc
+; CGP-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
+; CGP-NEXT:    v_mov_b32_e32 v4, v7
+; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s6, v10, v[4:5]
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v0, v5
+; CGP-NEXT:    v_addc_u32_e32 v11, vcc, v1, v5, vcc
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], -1, v9, v[7:8]
+; CGP-NEXT:    v_xor_b32_e32 v8, v4, v5
+; CGP-NEXT:    v_mul_lo_u32 v1, v10, v6
+; CGP-NEXT:    v_mul_lo_u32 v4, v9, v0
 ; CGP-NEXT:    v_mul_hi_u32 v7, v9, v6
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v5
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_mul_hi_u32 v6, v10, v6
+; CGP-NEXT:    v_xor_b32_e32 v11, v11, v5
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v4
+; CGP-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; CGP-NEXT:    v_mul_hi_u32 v6, v8, v6
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
 ; CGP-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v6, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v9, v0
-; CGP-NEXT:    v_addc_u32_e32 v1, vcc, v8, v1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v5, v11, v0
-; CGP-NEXT:    v_mul_lo_u32 v6, v10, v1
-; CGP-NEXT:    v_mul_hi_u32 v7, v10, v0
-; CGP-NEXT:    v_mul_hi_u32 v0, v11, v0
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
+; CGP-NEXT:    v_mul_lo_u32 v7, v10, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
+; CGP-NEXT:    v_mul_hi_u32 v4, v9, v0
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v7, v11, v1
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT:    v_mul_hi_u32 v6, v10, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_mul_hi_u32 v0, v10, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
+; CGP-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v9, v1
+; CGP-NEXT:    v_addc_u32_e32 v0, vcc, v10, v0, vcc
+; CGP-NEXT:    v_mul_lo_u32 v6, v11, v1
+; CGP-NEXT:    v_mul_lo_u32 v7, v8, v0
+; CGP-NEXT:    v_mul_hi_u32 v9, v8, v1
+; CGP-NEXT:    v_mul_hi_u32 v1, v11, v1
+; CGP-NEXT:    v_mov_b32_e32 v4, 0x1000
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v7
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v6
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v9
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; CGP-NEXT:    v_mul_lo_u32 v9, v11, v0
 ; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; CGP-NEXT:    v_add_i32_e32 v8, vcc, v0, v5
-; CGP-NEXT:    v_mul_hi_u32 v7, v11, v1
-; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s7, v8, 0
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s7, v5, v[1:2]
-; CGP-NEXT:    v_sub_i32_e32 v9, vcc, v10, v0
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], 0, v8, v[6:7]
-; CGP-NEXT:    v_mov_b32_e32 v5, 0x1000
-; CGP-NEXT:    v_cvt_f32_ubyte0_e32 v7, 0
+; CGP-NEXT:    v_mul_hi_u32 v7, v8, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v9, v1
+; CGP-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v7
+; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v7, vcc, v9, v7
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v6
+; CGP-NEXT:    v_mul_hi_u32 v9, v11, v0
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s7, v1, 0
+; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
+; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s7, v6, v[1:2]
+; CGP-NEXT:    v_sub_i32_e32 v9, vcc, v8, v0
 ; CGP-NEXT:    v_subb_u32_e64 v10, s[4:5], v11, v6, vcc
 ; CGP-NEXT:    v_sub_i32_e64 v0, s[4:5], v11, v6
-; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v9, v5
+; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v9, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v1, 0, -1, s[4:5]
 ; CGP-NEXT:    v_mov_b32_e32 v6, s8
 ; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v10
 ; CGP-NEXT:    v_cndmask_b32_e64 v11, v6, v1, s[4:5]
 ; CGP-NEXT:    v_cvt_f32_u32_e32 v1, 0x1000
+; CGP-NEXT:    v_cvt_f32_ubyte0_e32 v7, 0
 ; CGP-NEXT:    v_subbrev_u32_e32 v0, vcc, 0, v0, vcc
-; CGP-NEXT:    v_sub_i32_e32 v12, vcc, v9, v5
 ; CGP-NEXT:    v_mac_f32_e32 v1, 0x4f800000, v7
 ; CGP-NEXT:    v_rcp_iflag_f32_e32 v1, v1
+; CGP-NEXT:    v_sub_i32_e32 v12, vcc, v9, v4
 ; CGP-NEXT:    v_subbrev_u32_e32 v13, vcc, 0, v0, vcc
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v12, v5
 ; CGP-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v1
 ; CGP-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; CGP-NEXT:    v_trunc_f32_e32 v7, v1
 ; CGP-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v7
 ; CGP-NEXT:    v_cvt_u32_f32_e32 v14, v0
 ; CGP-NEXT:    v_cvt_u32_f32_e32 v15, v7
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v12, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
-; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v13
 ; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v14, 0
+; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v13
 ; CGP-NEXT:    v_cndmask_b32_e32 v16, v6, v8, vcc
 ; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s6, v15, v[1:2]
-; CGP-NEXT:    v_sub_i32_e32 v1, vcc, v12, v5
+; CGP-NEXT:    v_sub_i32_e32 v1, vcc, v12, v4
 ; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], -1, v14, v[7:8]
 ; CGP-NEXT:    v_subbrev_u32_e32 v17, vcc, 0, v13, vcc
 ; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v16
@@ -1529,12 +1527,13 @@ define <2 x i64> @v_srem_v2i64_pow2k_denom(<2 x i64> %num) {
 ; CGP-NEXT:    v_addc_u32_e32 v14, vcc, v15, v1, vcc
 ; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v12, 0
 ; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v11
-; CGP-NEXT:    v_cndmask_b32_e32 v9, v9, v8, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v7, v9, v8, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v9, v10, v13, vcc
+; CGP-NEXT:    v_xor_b32_e32 v10, v7, v5
 ; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s6, v14, v[1:2]
-; CGP-NEXT:    v_xor_b32_e32 v1, v9, v4
+; CGP-NEXT:    v_xor_b32_e32 v1, v9, v5
 ; CGP-NEXT:    v_ashrrev_i32_e32 v9, 31, v3
 ; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], -1, v12, v[7:8]
-; CGP-NEXT:    v_cndmask_b32_e32 v10, v10, v13, vcc
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v9
 ; CGP-NEXT:    v_addc_u32_e32 v3, vcc, v3, v9, vcc
 ; CGP-NEXT:    v_xor_b32_e32 v11, v2, v9
@@ -1560,58 +1559,55 @@ define <2 x i64> @v_srem_v2i64_pow2k_denom(<2 x i64> %num) {
 ; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v12, v0
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v12, v0
 ; CGP-NEXT:    v_addc_u32_e32 v2, vcc, v14, v2, vcc
-; CGP-NEXT:    v_mul_lo_u32 v3, v13, v0
-; CGP-NEXT:    v_mul_lo_u32 v7, v11, v2
-; CGP-NEXT:    v_mul_hi_u32 v8, v11, v0
-; CGP-NEXT:    v_mul_hi_u32 v0, v13, v0
-; CGP-NEXT:    v_xor_b32_e32 v10, v10, v4
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v7
+; CGP-NEXT:    v_mul_lo_u32 v7, v13, v3
+; CGP-NEXT:    v_mul_lo_u32 v8, v11, v2
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v10, v5
+; CGP-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; CGP-NEXT:    v_mul_hi_u32 v5, v11, v3
+; CGP-NEXT:    v_add_i32_e32 v7, vcc, v7, v8
+; CGP-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
+; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_mul_lo_u32 v7, v13, v2
+; CGP-NEXT:    v_mul_hi_u32 v3, v13, v3
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; CGP-NEXT:    v_mul_hi_u32 v8, v11, v2
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v7, v3
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v8
-; CGP-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v8, v13, v2
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, v7, v3
-; CGP-NEXT:    v_mul_hi_u32 v7, v11, v2
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v8, v0
 ; CGP-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v7, vcc, v8, v7
-; CGP-NEXT:    v_add_i32_e32 v12, vcc, v0, v3
+; CGP-NEXT:    v_add_i32_e32 v7, vcc, v7, v8
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
 ; CGP-NEXT:    v_mul_hi_u32 v8, v13, v2
-; CGP-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s7, v12, 0
-; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
-; CGP-NEXT:    v_add_i32_e32 v7, vcc, v8, v0
-; CGP-NEXT:    v_mov_b32_e32 v0, v3
-; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s7, v7, v[0:1]
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v1, v4
-; CGP-NEXT:    v_subb_u32_e32 v1, vcc, v10, v4, vcc
-; CGP-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], 0, v12, v[7:8]
+; CGP-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s7, v3, 0
+; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s7, v5, v[3:4]
 ; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v11, v2
-; CGP-NEXT:    v_subb_u32_e64 v4, s[4:5], v13, v3, vcc
-; CGP-NEXT:    v_sub_i32_e64 v3, s[4:5], v13, v3
-; CGP-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; CGP-NEXT:    v_sub_i32_e32 v8, vcc, v2, v5
-; CGP-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v2, v5
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v8, v5
+; CGP-NEXT:    v_sub_i32_e64 v5, s[4:5], v13, v7
+; CGP-NEXT:    v_subb_u32_e64 v3, s[4:5], v13, v7, vcc
+; CGP-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; CGP-NEXT:    v_sub_i32_e32 v8, vcc, v2, v4
+; CGP-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v2, v4
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v8, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[4:5]
-; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v4
+; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v3
 ; CGP-NEXT:    v_cndmask_b32_e64 v10, 0, -1, vcc
-; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
+; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, v6, v7, s[4:5]
 ; CGP-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc
-; CGP-NEXT:    v_sub_i32_e32 v5, vcc, v8, v5
-; CGP-NEXT:    v_subbrev_u32_e32 v10, vcc, 0, v3, vcc
+; CGP-NEXT:    v_sub_i32_e32 v4, vcc, v8, v4
+; CGP-NEXT:    v_subbrev_u32_e32 v10, vcc, 0, v5, vcc
 ; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
-; CGP-NEXT:    v_cndmask_b32_e32 v5, v8, v5, vcc
-; CGP-NEXT:    v_cndmask_b32_e32 v3, v3, v10, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v4, v8, v4, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v5, v5, v10, vcc
 ; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
-; CGP-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
-; CGP-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
 ; CGP-NEXT:    v_xor_b32_e32 v2, v2, v9
 ; CGP-NEXT:    v_xor_b32_e32 v3, v3, v9
 ; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v9
@@ -1665,84 +1661,83 @@ define i64 @v_srem_i64_oddk_denom(i64 %num) {
 ; CHECK-NEXT:    v_addc_u32_e32 v6, vcc, v6, v3, vcc
 ; CHECK-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s6, v5, 0
 ; CHECK-NEXT:    v_ashrrev_i32_e32 v7, 31, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
+; CHECK-NEXT:    v_add_i32_e32 v8, vcc, v0, v7
 ; CHECK-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], s6, v6, v[3:4]
-; CHECK-NEXT:    v_addc_u32_e32 v1, vcc, v1, v7, vcc
-; CHECK-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], -1, v5, v[3:4]
-; CHECK-NEXT:    v_xor_b32_e32 v4, v0, v7
-; CHECK-NEXT:    v_mul_lo_u32 v0, v6, v2
-; CHECK-NEXT:    v_mul_lo_u32 v8, v5, v3
-; CHECK-NEXT:    v_xor_b32_e32 v9, v1, v7
-; CHECK-NEXT:    v_mul_hi_u32 v1, v5, v2
+; CHECK-NEXT:    v_addc_u32_e32 v9, vcc, v1, v7, vcc
+; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], -1, v5, v[3:4]
+; CHECK-NEXT:    v_xor_b32_e32 v3, v8, v7
+; CHECK-NEXT:    v_mul_lo_u32 v1, v6, v2
+; CHECK-NEXT:    v_mul_lo_u32 v8, v5, v0
+; CHECK-NEXT:    v_xor_b32_e32 v4, v9, v7
+; CHECK-NEXT:    v_mul_hi_u32 v9, v5, v2
 ; CHECK-NEXT:    v_mul_hi_u32 v2, v6, v2
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v8
-; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v1, v6, v3
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v8, v0
-; CHECK-NEXT:    v_mul_hi_u32 v8, v5, v3
-; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
-; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v8
 ; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
-; CHECK-NEXT:    v_mul_hi_u32 v3, v6, v3
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v9
 ; CHECK-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
+; CHECK-NEXT:    v_mul_lo_u32 v9, v6, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v8, v1
+; CHECK-NEXT:    v_mul_hi_u32 v8, v5, v0
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v9, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
+; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v8, vcc, v9, v8
+; CHECK-NEXT:    v_mul_hi_u32 v0, v6, v0
 ; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
-; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v5, v0
-; CHECK-NEXT:    v_addc_u32_e32 v1, vcc, v6, v1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v2, v9, v0
-; CHECK-NEXT:    v_mul_lo_u32 v3, v4, v1
-; CHECK-NEXT:    v_mul_hi_u32 v5, v4, v0
-; CHECK-NEXT:    v_mul_hi_u32 v0, v9, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v8, v2
+; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
+; CHECK-NEXT:    v_addc_u32_e32 v0, vcc, v6, v0, vcc
+; CHECK-NEXT:    v_mul_lo_u32 v2, v4, v1
+; CHECK-NEXT:    v_mul_lo_u32 v5, v3, v0
+; CHECK-NEXT:    v_mul_hi_u32 v8, v3, v1
+; CHECK-NEXT:    v_mul_hi_u32 v1, v4, v1
 ; CHECK-NEXT:    s_mov_b32 s6, 0x12d8fb
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
 ; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v5
+; CHECK-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v8
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT:    v_mul_lo_u32 v5, v9, v1
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT:    v_mul_hi_u32 v3, v4, v1
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v5, v0
+; CHECK-NEXT:    v_mul_lo_u32 v8, v4, v0
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; CHECK-NEXT:    v_mul_hi_u32 v5, v3, v0
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v8, v1
+; CHECK-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v5
 ; CHECK-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
-; CHECK-NEXT:    v_add_i32_e32 v5, vcc, v0, v2
-; CHECK-NEXT:    v_mul_hi_u32 v6, v9, v1
-; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v5, 0
+; CHECK-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; CHECK-NEXT:    v_add_i32_e32 v1, vcc, v1, v2
+; CHECK-NEXT:    v_mul_hi_u32 v8, v4, v0
+; CHECK-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v1, 0
 ; CHECK-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v6, v2
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v5, v2
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v8, v2
 ; CHECK-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], s6, v2, v[1:2]
-; CHECK-NEXT:    v_mov_b32_e32 v3, 0x12d8fb
+; CHECK-NEXT:    v_sub_i32_e64 v0, s[4:5], v3, v0
+; CHECK-NEXT:    v_mov_b32_e32 v6, 0x12d8fb
+; CHECK-NEXT:    v_subb_u32_e64 v2, vcc, v4, v1, s[4:5]
+; CHECK-NEXT:    v_sub_i32_e32 v1, vcc, v4, v1
 ; CHECK-NEXT:    s_bfe_i32 s6, 1, 0x10000
-; CHECK-NEXT:    v_mad_u64_u32 v[1:2], s[4:5], 0, v5, v[1:2]
-; CHECK-NEXT:    v_sub_i32_e64 v0, s[4:5], v4, v0
-; CHECK-NEXT:    v_subb_u32_e64 v2, vcc, v9, v1, s[4:5]
-; CHECK-NEXT:    v_sub_i32_e32 v1, vcc, v9, v1
-; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
-; CHECK-NEXT:    v_mov_b32_e32 v5, s6
+; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v0, v6
+; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
+; CHECK-NEXT:    v_mov_b32_e32 v4, s6
 ; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; CHECK-NEXT:    v_cndmask_b32_e32 v4, v5, v4, vcc
-; CHECK-NEXT:    v_sub_i32_e32 v6, vcc, v0, v3
+; CHECK-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
+; CHECK-NEXT:    v_sub_i32_e32 v5, vcc, v0, v6
 ; CHECK-NEXT:    v_subbrev_u32_e64 v1, s[4:5], 0, v1, s[4:5]
 ; CHECK-NEXT:    v_subbrev_u32_e32 v1, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v6, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
+; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v6
+; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
 ; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v1
-; CHECK-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
-; CHECK-NEXT:    v_subrev_i32_e32 v5, vcc, 0x12d8fb, v6
+; CHECK-NEXT:    v_cndmask_b32_e32 v4, v4, v6, vcc
+; CHECK-NEXT:    v_subrev_i32_e32 v6, vcc, 0x12d8fb, v5
 ; CHECK-NEXT:    v_subbrev_u32_e32 v8, vcc, 0, v1, vcc
-; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
-; CHECK-NEXT:    v_cndmask_b32_e32 v3, v6, v5, vcc
-; CHECK-NEXT:    v_cndmask_b32_e32 v1, v1, v8, vcc
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v4
-; CHECK-NEXT:    v_cndmask_b32_e32 v0, v0, v3, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v4, v5, v6, vcc
+; CHECK-NEXT:    v_cndmask_b32_e32 v1, v1, v8, vcc
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v3
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
 ; CHECK-NEXT:    v_cndmask_b32_e32 v1, v2, v1, vcc
 ; CHECK-NEXT:    v_xor_b32_e32 v0, v0, v7
 ; CHECK-NEXT:    v_xor_b32_e32 v1, v1, v7
@@ -2061,92 +2056,91 @@ define <2 x i64> @v_srem_v2i64_oddk_denom(<2 x i64> %num) {
 ; CGP-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
 ; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
 ; CGP-NEXT:    v_add_i32_e32 v9, vcc, v7, v4
-; CGP-NEXT:    v_addc_u32_e32 v8, vcc, v8, v5, vcc
-; CGP-NEXT:    v_mad_u64_u32 v[5:6], s[4:5], s6, v9, 0
-; CGP-NEXT:    v_mov_b32_e32 v4, v6
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s6, v8, v[4:5]
-; CGP-NEXT:    v_ashrrev_i32_e32 v4, 31, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], -1, v9, v[6:7]
-; CGP-NEXT:    v_addc_u32_e32 v1, vcc, v1, v4, vcc
-; CGP-NEXT:    v_xor_b32_e32 v10, v0, v4
-; CGP-NEXT:    v_mul_lo_u32 v0, v8, v5
-; CGP-NEXT:    v_mul_lo_u32 v7, v9, v6
-; CGP-NEXT:    v_xor_b32_e32 v11, v1, v4
-; CGP-NEXT:    v_mul_hi_u32 v1, v9, v5
-; CGP-NEXT:    v_mul_hi_u32 v5, v8, v5
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v1
-; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v1, v8, v6
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
+; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s6, v9, 0
+; CGP-NEXT:    v_addc_u32_e32 v10, vcc, v8, v5, vcc
+; CGP-NEXT:    v_ashrrev_i32_e32 v5, 31, v1
+; CGP-NEXT:    v_mov_b32_e32 v4, v7
+; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s6, v10, v[4:5]
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v0, v5
+; CGP-NEXT:    v_addc_u32_e32 v11, vcc, v1, v5, vcc
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], -1, v9, v[7:8]
+; CGP-NEXT:    v_xor_b32_e32 v8, v4, v5
+; CGP-NEXT:    v_mul_lo_u32 v1, v10, v6
+; CGP-NEXT:    v_mul_lo_u32 v4, v9, v0
 ; CGP-NEXT:    v_mul_hi_u32 v7, v9, v6
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v5
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_mul_hi_u32 v6, v10, v6
+; CGP-NEXT:    v_xor_b32_e32 v11, v11, v5
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v4
+; CGP-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; CGP-NEXT:    v_mul_hi_u32 v6, v8, v6
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
 ; CGP-NEXT:    v_cndmask_b32_e64 v1, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v6, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v9, v0
-; CGP-NEXT:    v_addc_u32_e32 v1, vcc, v8, v1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v5, v11, v0
-; CGP-NEXT:    v_mul_lo_u32 v6, v10, v1
-; CGP-NEXT:    v_mul_hi_u32 v7, v10, v0
-; CGP-NEXT:    v_mul_hi_u32 v0, v11, v0
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v6
+; CGP-NEXT:    v_mul_lo_u32 v7, v10, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
+; CGP-NEXT:    v_mul_hi_u32 v4, v9, v0
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v7, v11, v1
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT:    v_mul_hi_u32 v6, v10, v1
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_mul_hi_u32 v0, v10, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v4, v1
+; CGP-NEXT:    v_cndmask_b32_e64 v4, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v4, vcc, v6, v4
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v9, v1
+; CGP-NEXT:    v_addc_u32_e32 v0, vcc, v10, v0, vcc
+; CGP-NEXT:    v_mul_lo_u32 v6, v11, v1
+; CGP-NEXT:    v_mul_lo_u32 v7, v8, v0
+; CGP-NEXT:    v_mul_hi_u32 v9, v8, v1
+; CGP-NEXT:    v_mul_hi_u32 v1, v11, v1
+; CGP-NEXT:    v_mov_b32_e32 v4, 0x12d8fb
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v7
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v6
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v6, v9
 ; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; CGP-NEXT:    v_mul_lo_u32 v9, v11, v0
 ; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; CGP-NEXT:    v_add_i32_e32 v8, vcc, v0, v5
-; CGP-NEXT:    v_mul_hi_u32 v7, v11, v1
-; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s7, v8, 0
-; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; CGP-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s7, v5, v[1:2]
-; CGP-NEXT:    v_sub_i32_e32 v9, vcc, v10, v0
-; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], 0, v8, v[6:7]
-; CGP-NEXT:    v_mov_b32_e32 v5, 0x12d8fb
-; CGP-NEXT:    v_cvt_f32_ubyte0_e32 v7, 0
+; CGP-NEXT:    v_mul_hi_u32 v7, v8, v0
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v9, v1
+; CGP-NEXT:    v_cndmask_b32_e64 v9, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v7
+; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v7, vcc, v9, v7
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v6
+; CGP-NEXT:    v_mul_hi_u32 v9, v11, v0
+; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s7, v1, 0
+; CGP-NEXT:    v_cndmask_b32_e64 v6, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
+; CGP-NEXT:    v_add_i32_e32 v6, vcc, v9, v6
+; CGP-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], s7, v6, v[1:2]
+; CGP-NEXT:    v_sub_i32_e32 v9, vcc, v8, v0
 ; CGP-NEXT:    v_subb_u32_e64 v10, s[4:5], v11, v6, vcc
 ; CGP-NEXT:    v_sub_i32_e64 v0, s[4:5], v11, v6
-; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v9, v5
+; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v9, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v1, 0, -1, s[4:5]
 ; CGP-NEXT:    v_mov_b32_e32 v6, s8
 ; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v10
 ; CGP-NEXT:    v_cndmask_b32_e64 v11, v6, v1, s[4:5]
 ; CGP-NEXT:    v_cvt_f32_u32_e32 v1, 0x12d8fb
+; CGP-NEXT:    v_cvt_f32_ubyte0_e32 v7, 0
 ; CGP-NEXT:    v_subbrev_u32_e32 v0, vcc, 0, v0, vcc
-; CGP-NEXT:    v_sub_i32_e32 v12, vcc, v9, v5
 ; CGP-NEXT:    v_mac_f32_e32 v1, 0x4f800000, v7
 ; CGP-NEXT:    v_rcp_iflag_f32_e32 v1, v1
+; CGP-NEXT:    v_sub_i32_e32 v12, vcc, v9, v4
 ; CGP-NEXT:    v_subbrev_u32_e32 v13, vcc, 0, v0, vcc
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v12, v5
 ; CGP-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v1
 ; CGP-NEXT:    v_mul_f32_e32 v1, 0x2f800000, v0
 ; CGP-NEXT:    v_trunc_f32_e32 v7, v1
 ; CGP-NEXT:    v_mac_f32_e32 v0, 0xcf800000, v7
 ; CGP-NEXT:    v_cvt_u32_f32_e32 v14, v0
 ; CGP-NEXT:    v_cvt_u32_f32_e32 v15, v7
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v12, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
-; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v13
 ; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v14, 0
+; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v13
 ; CGP-NEXT:    v_cndmask_b32_e32 v16, v6, v8, vcc
 ; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s6, v15, v[1:2]
-; CGP-NEXT:    v_sub_i32_e32 v1, vcc, v12, v5
+; CGP-NEXT:    v_sub_i32_e32 v1, vcc, v12, v4
 ; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], -1, v14, v[7:8]
 ; CGP-NEXT:    v_subbrev_u32_e32 v17, vcc, 0, v13, vcc
 ; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v16
@@ -2177,12 +2171,13 @@ define <2 x i64> @v_srem_v2i64_oddk_denom(<2 x i64> %num) {
 ; CGP-NEXT:    v_addc_u32_e32 v14, vcc, v15, v1, vcc
 ; CGP-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], s6, v12, 0
 ; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v11
-; CGP-NEXT:    v_cndmask_b32_e32 v9, v9, v8, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v7, v9, v8, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v9, v10, v13, vcc
+; CGP-NEXT:    v_xor_b32_e32 v10, v7, v5
 ; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s6, v14, v[1:2]
-; CGP-NEXT:    v_xor_b32_e32 v1, v9, v4
+; CGP-NEXT:    v_xor_b32_e32 v1, v9, v5
 ; CGP-NEXT:    v_ashrrev_i32_e32 v9, 31, v3
 ; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], -1, v12, v[7:8]
-; CGP-NEXT:    v_cndmask_b32_e32 v10, v10, v13, vcc
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v2, v9
 ; CGP-NEXT:    v_addc_u32_e32 v3, vcc, v3, v9, vcc
 ; CGP-NEXT:    v_xor_b32_e32 v11, v2, v9
@@ -2208,58 +2203,55 @@ define <2 x i64> @v_srem_v2i64_oddk_denom(<2 x i64> %num) {
 ; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v7, v2
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v12, v0
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v12, v0
 ; CGP-NEXT:    v_addc_u32_e32 v2, vcc, v14, v2, vcc
-; CGP-NEXT:    v_mul_lo_u32 v3, v13, v0
-; CGP-NEXT:    v_mul_lo_u32 v7, v11, v2
-; CGP-NEXT:    v_mul_hi_u32 v8, v11, v0
-; CGP-NEXT:    v_mul_hi_u32 v0, v13, v0
-; CGP-NEXT:    v_xor_b32_e32 v10, v10, v4
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v7
+; CGP-NEXT:    v_mul_lo_u32 v7, v13, v3
+; CGP-NEXT:    v_mul_lo_u32 v8, v11, v2
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v10, v5
+; CGP-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; CGP-NEXT:    v_mul_hi_u32 v5, v11, v3
+; CGP-NEXT:    v_add_i32_e32 v7, vcc, v7, v8
+; CGP-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
+; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_mul_lo_u32 v7, v13, v2
+; CGP-NEXT:    v_mul_hi_u32 v3, v13, v3
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; CGP-NEXT:    v_mul_hi_u32 v8, v11, v2
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v7, v3
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
 ; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v8
-; CGP-NEXT:    v_cndmask_b32_e64 v3, 0, 1, vcc
-; CGP-NEXT:    v_mul_lo_u32 v8, v13, v2
-; CGP-NEXT:    v_add_i32_e32 v3, vcc, v7, v3
-; CGP-NEXT:    v_mul_hi_u32 v7, v11, v2
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v8, v0
 ; CGP-NEXT:    v_cndmask_b32_e64 v8, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v0, v7
-; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v7, vcc, v8, v7
-; CGP-NEXT:    v_add_i32_e32 v12, vcc, v0, v3
+; CGP-NEXT:    v_add_i32_e32 v7, vcc, v7, v8
+; CGP-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
 ; CGP-NEXT:    v_mul_hi_u32 v8, v13, v2
-; CGP-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s7, v12, 0
-; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v7, v0
-; CGP-NEXT:    v_add_i32_e32 v7, vcc, v8, v0
-; CGP-NEXT:    v_mov_b32_e32 v0, v3
-; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s7, v7, v[0:1]
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v1, v4
-; CGP-NEXT:    v_subb_u32_e32 v1, vcc, v10, v4, vcc
-; CGP-NEXT:    v_mad_u64_u32 v[3:4], s[4:5], 0, v12, v[7:8]
+; CGP-NEXT:    v_mad_u64_u32 v[2:3], s[4:5], s7, v3, 0
+; CGP-NEXT:    v_cndmask_b32_e64 v5, 0, 1, vcc
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v7, v5
+; CGP-NEXT:    v_add_i32_e32 v5, vcc, v8, v5
+; CGP-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], s7, v5, v[3:4]
 ; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v11, v2
-; CGP-NEXT:    v_subb_u32_e64 v4, s[4:5], v13, v3, vcc
-; CGP-NEXT:    v_sub_i32_e64 v3, s[4:5], v13, v3
-; CGP-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; CGP-NEXT:    v_sub_i32_e32 v8, vcc, v2, v5
-; CGP-NEXT:    v_subbrev_u32_e32 v3, vcc, 0, v3, vcc
-; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v2, v5
-; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v8, v5
+; CGP-NEXT:    v_sub_i32_e64 v5, s[4:5], v13, v7
+; CGP-NEXT:    v_subb_u32_e64 v3, s[4:5], v13, v7, vcc
+; CGP-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; CGP-NEXT:    v_sub_i32_e32 v8, vcc, v2, v4
+; CGP-NEXT:    v_subbrev_u32_e32 v5, vcc, 0, v5, vcc
+; CGP-NEXT:    v_cmp_ge_u32_e64 s[4:5], v2, v4
+; CGP-NEXT:    v_cmp_ge_u32_e32 vcc, v8, v4
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[4:5]
-; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v4
+; CGP-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v3
 ; CGP-NEXT:    v_cndmask_b32_e64 v10, 0, -1, vcc
-; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v3
+; CGP-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v5
 ; CGP-NEXT:    v_cndmask_b32_e64 v7, v6, v7, s[4:5]
 ; CGP-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc
-; CGP-NEXT:    v_sub_i32_e32 v5, vcc, v8, v5
-; CGP-NEXT:    v_subbrev_u32_e32 v10, vcc, 0, v3, vcc
+; CGP-NEXT:    v_sub_i32_e32 v4, vcc, v8, v4
+; CGP-NEXT:    v_subbrev_u32_e32 v10, vcc, 0, v5, vcc
 ; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v6
-; CGP-NEXT:    v_cndmask_b32_e32 v5, v8, v5, vcc
-; CGP-NEXT:    v_cndmask_b32_e32 v3, v3, v10, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v4, v8, v4, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v5, v5, v10, vcc
 ; CGP-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v7
-; CGP-NEXT:    v_cndmask_b32_e32 v2, v2, v5, vcc
-; CGP-NEXT:    v_cndmask_b32_e32 v3, v4, v3, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
+; CGP-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
 ; CGP-NEXT:    v_xor_b32_e32 v2, v2, v9
 ; CGP-NEXT:    v_xor_b32_e32 v3, v3, v9
 ; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v9

diff  --git a/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp b/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
index 8e13616512ea6..b829df42dc401 100644
--- a/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
+++ b/llvm/unittests/CodeGen/GlobalISel/LegalizerTest.cpp
@@ -8,6 +8,7 @@
 
 #include "llvm/CodeGen/GlobalISel/Legalizer.h"
 #include "GISelMITest.h"
+#include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
 
 #define DEBUG_TYPE "legalizer-test"
@@ -65,9 +66,10 @@ TEST_F(AArch64GISelMITest, BasicLegalizerTest) {
 
   ALegalizerInfo LI(MF->getSubtarget());
   LostDebugLocObserver LocObserver(DEBUG_TYPE);
+  GISelKnownBits KB(*MF);
 
   Legalizer::MFResult Result = Legalizer::legalizeMachineFunction(
-      *MF, LI, {&LocObserver}, LocObserver, B);
+      *MF, LI, {&LocObserver}, LocObserver, B, &KB);
 
   EXPECT_TRUE(isNullMIPtr(Result.FailedOn));
   EXPECT_TRUE(Result.Changed);
@@ -102,6 +104,7 @@ TEST_F(AArch64GISelMITest, UnorderedArtifactCombiningTest) {
 
   ALegalizerInfo LI(MF->getSubtarget());
   LostDebugLocObserver LocObserver(DEBUG_TYPE);
+  GISelKnownBits KB(*MF);
 
   // The events here unfold as follows:
   // 1. First, the function is scanned pre-forming the worklist of artifacts:
@@ -158,7 +161,7 @@ TEST_F(AArch64GISelMITest, UnorderedArtifactCombiningTest) {
   //  the process follows def-use chains, making them shorter at each step, thus
   //  combining everything that can be combined in O(n) time.
   Legalizer::MFResult Result = Legalizer::legalizeMachineFunction(
-      *MF, LI, {&LocObserver}, LocObserver, B);
+      *MF, LI, {&LocObserver}, LocObserver, B, &KB);
 
   EXPECT_TRUE(isNullMIPtr(Result.FailedOn));
   EXPECT_TRUE(Result.Changed);
@@ -195,9 +198,10 @@ TEST_F(AArch64GISelMITest, UnorderedArtifactCombiningManyCopiesTest) {
 
   ALegalizerInfo LI(MF->getSubtarget());
   LostDebugLocObserver LocObserver(DEBUG_TYPE);
+  GISelKnownBits KB(*MF);
 
   Legalizer::MFResult Result = Legalizer::legalizeMachineFunction(
-      *MF, LI, {&LocObserver}, LocObserver, B);
+      *MF, LI, {&LocObserver}, LocObserver, B, &KB);
 
   EXPECT_TRUE(isNullMIPtr(Result.FailedOn));
   EXPECT_TRUE(Result.Changed);


        


More information about the llvm-commits mailing list