[llvm] 72e8754 - [AMDGPU] Disable 'Skip Uniform Regions' optimization by default for AMDGPU.

Konstantin Pyzhov via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 6 14:02:49 PDT 2020


Author: Konstantin Pyzhov
Date: 2020-04-06T09:05:58-04:00
New Revision: 72e8754916f5f003a49477adc9694d3465b44414

URL: https://github.com/llvm/llvm-project/commit/72e8754916f5f003a49477adc9694d3465b44414
DIFF: https://github.com/llvm/llvm-project/commit/72e8754916f5f003a49477adc9694d3465b44414.diff

LOG: [AMDGPU] Disable 'Skip Uniform Regions' optimization by default for AMDGPU.

Reviewers: sameerds, dstuttard

Differential Revision: https://reviews.llvm.org/D77228

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
    llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.ll
    llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
    llvm/test/CodeGen/AMDGPU/branch-uniformity.ll
    llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
    llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
    llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
    llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll
    llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
    llvm/test/CodeGen/AMDGPU/early-if-convert.ll
    llvm/test/CodeGen/AMDGPU/i1-copy-phi-uniform-branch.ll
    llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
    llvm/test/CodeGen/AMDGPU/infinite-loop.ll
    llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
    llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
    llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll
    llvm/test/CodeGen/AMDGPU/salu-to-valu.ll
    llvm/test/CodeGen/AMDGPU/sdiv64.ll
    llvm/test/CodeGen/AMDGPU/setcc.ll
    llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
    llvm/test/CodeGen/AMDGPU/sgpr-copy.ll
    llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
    llvm/test/CodeGen/AMDGPU/srem64.ll
    llvm/test/CodeGen/AMDGPU/udiv64.ll
    llvm/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll
    llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
    llvm/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll
    llvm/test/CodeGen/AMDGPU/urem64.ll
    llvm/test/CodeGen/AMDGPU/valu-i1.ll
    llvm/test/CodeGen/AMDGPU/wqm.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 17c48a96cd5e..f85c088fd53f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -866,7 +866,7 @@ bool GCNPassConfig::addPreISel() {
     if (EnableStructurizerWorkarounds) {
       addPass(createUnifyLoopExitsPass());
     }
-    addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
+    addPass(createStructurizeCFGPass(false)); // true -> SkipUniformRegions
   }
   addPass(createSinkingPass());
   addPass(createAMDGPUAnnotateUniformValues());

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll
index 3f18877acd94..0be830b3965a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/divergent-control-flow.ll
@@ -136,9 +136,10 @@ define void @constrained_if_register_class() {
 ; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
 ; CHECK-NEXT:    s_cmp_lg_u32 s4, 0
 ; CHECK-NEXT:    s_cselect_b32 s4, 1, 0
+; CHECK-NEXT:    s_xor_b32 s4, s4, 1
 ; CHECK-NEXT:    s_and_b32 s4, s4, 1
 ; CHECK-NEXT:    s_cmp_lg_u32 s4, 0
-; CHECK-NEXT:    s_cbranch_scc1 BB4_6
+; CHECK-NEXT:    s_cbranch_scc0 BB4_6
 ; CHECK-NEXT:  ; %bb.1: ; %bb2
 ; CHECK-NEXT:    s_getpc_b64 s[6:7]
 ; CHECK-NEXT:    s_add_u32 s6, s6, const.ptr at gotpcrel32@lo+4

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll
index 9238f1e08872..19471deb887f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/localizer.ll
@@ -7,38 +7,45 @@
 define amdgpu_kernel void @localize_constants(i1 %cond) {
 ; GFX9-LABEL: localize_constants:
 ; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dword s0, s[4:5], 0x0
+; GFX9-NEXT:    s_load_dword s1, s[4:5], 0x0
+; GFX9-NEXT:    s_mov_b32 s0, 1
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_and_b32 s0, s0, 1
-; GFX9-NEXT:    s_cmp_lg_u32 s0, 0
+; GFX9-NEXT:    s_xor_b32 s1, s1, 1
+; GFX9-NEXT:    s_and_b32 s1, s1, 1
+; GFX9-NEXT:    s_cmp_lg_u32 s1, 0
 ; GFX9-NEXT:    s_cbranch_scc0 BB0_2
-; GFX9-NEXT:  ; %bb.1: ; %bb0
-; GFX9-NEXT:    v_mov_b32_e32 v0, 0x7b
-; GFX9-NEXT:    global_store_dword v[0:1], v0, off
-; GFX9-NEXT:    v_mov_b32_e32 v0, 0x1c8
+; GFX9-NEXT:  ; %bb.1: ; %bb1
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x5be6
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
-; GFX9-NEXT:    v_mov_b32_e32 v0, 0x3e7
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x1c7
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x3e8
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
-; GFX9-NEXT:    v_mov_b32_e32 v0, 0x1c7
-; GFX9-NEXT:    global_store_dword v[0:1], v0, off
-; GFX9-NEXT:    v_mov_b32_e32 v0, 0x5be6
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x1c8
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
-; GFX9-NEXT:    s_endpgm
-; GFX9-NEXT:  BB0_2: ; %bb1
-; GFX9-NEXT:    v_mov_b32_e32 v0, 0x5be6
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x3e7
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
-; GFX9-NEXT:    v_mov_b32_e32 v0, 0x1c7
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX9-NEXT:    s_mov_b32 s0, 0
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
-; GFX9-NEXT:    v_mov_b32_e32 v0, 0x3e8
+; GFX9-NEXT:  BB0_2: ; %Flow
+; GFX9-NEXT:    s_and_b32 s0, s0, 1
+; GFX9-NEXT:    s_cmp_lg_u32 s0, 0
+; GFX9-NEXT:    s_cbranch_scc0 BB0_4
+; GFX9-NEXT:  ; %bb.3: ; %bb0
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x7b
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x1c8
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0x3e7
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
-; GFX9-NEXT:    v_mov_b32_e32 v0, 0x7b
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x3e8
 ; GFX9-NEXT:    global_store_dword v[0:1], v0, off
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x1c7
+; GFX9-NEXT:    global_store_dword v[0:1], v0, off
+; GFX9-NEXT:    v_mov_b32_e32 v0, 0x5be6
+; GFX9-NEXT:    global_store_dword v[0:1], v0, off
+; GFX9-NEXT:  BB0_4: ; %bb2
 ; GFX9-NEXT:    s_endpgm
 entry:
   br i1 %cond, label %bb0, label %bb1
@@ -75,31 +82,46 @@ bb2:
 define amdgpu_kernel void @localize_globals(i1 %cond) {
 ; GFX9-LABEL: localize_globals:
 ; GFX9:       ; %bb.0: ; %entry
-; GFX9-NEXT:    s_load_dword s0, s[4:5], 0x0
+; GFX9-NEXT:    s_load_dword s1, s[4:5], 0x0
+; GFX9-NEXT:    s_mov_b32 s0, 1
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    s_xor_b32 s1, s1, 1
+; GFX9-NEXT:    s_and_b32 s1, s1, 1
+; GFX9-NEXT:    s_cmp_lg_u32 s1, 0
+; GFX9-NEXT:    s_cbranch_scc0 BB1_2
+; GFX9-NEXT:  ; %bb.1: ; %bb1
+; GFX9-NEXT:    s_getpc_b64 s[2:3]
+; GFX9-NEXT:    s_add_u32 s2, s2, gv2 at gotpcrel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s3, s3, gv2 at gotpcrel32@hi+4
+; GFX9-NEXT:    s_getpc_b64 s[4:5]
+; GFX9-NEXT:    s_add_u32 s4, s4, gv3 at gotpcrel32@lo+4
+; GFX9-NEXT:    s_addc_u32 s5, s5, gv3 at gotpcrel32@hi+4
+; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[4:5], 0x0
+; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_mov_b32 s0, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
+; GFX9-NEXT:    v_mov_b32_e32 v0, s2
+; GFX9-NEXT:    v_mov_b32_e32 v1, s3
+; GFX9-NEXT:    global_store_dword v[0:1], v2, off
+; GFX9-NEXT:    v_mov_b32_e32 v0, s4
+; GFX9-NEXT:    v_mov_b32_e32 v2, 1
+; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    global_store_dword v[0:1], v2, off
+; GFX9-NEXT:  BB1_2: ; %Flow
 ; GFX9-NEXT:    s_and_b32 s0, s0, 1
 ; GFX9-NEXT:    s_cmp_lg_u32 s0, 0
-; GFX9-NEXT:    s_cbranch_scc0 BB1_2
-; GFX9-NEXT:  ; %bb.1: ; %bb0
+; GFX9-NEXT:    s_cbranch_scc0 BB1_4
+; GFX9-NEXT:  ; %bb.3: ; %bb0
 ; GFX9-NEXT:    s_getpc_b64 s[0:1]
 ; GFX9-NEXT:    s_add_u32 s0, s0, gv0 at gotpcrel32@lo+4
 ; GFX9-NEXT:    s_addc_u32 s1, s1, gv0 at gotpcrel32@hi+4
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
 ; GFX9-NEXT:    s_getpc_b64 s[2:3]
 ; GFX9-NEXT:    s_add_u32 s2, s2, gv1 at gotpcrel32@lo+4
 ; GFX9-NEXT:    s_addc_u32 s3, s3, gv1 at gotpcrel32@hi+4
-; GFX9-NEXT:    s_branch BB1_3
-; GFX9-NEXT:  BB1_2: ; %bb1
-; GFX9-NEXT:    s_getpc_b64 s[0:1]
-; GFX9-NEXT:    s_add_u32 s0, s0, gv2 at gotpcrel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s1, s1, gv2 at gotpcrel32@hi+4
-; GFX9-NEXT:    v_mov_b32_e32 v2, 0
-; GFX9-NEXT:    s_getpc_b64 s[2:3]
-; GFX9-NEXT:    s_add_u32 s2, s2, gv3 at gotpcrel32@lo+4
-; GFX9-NEXT:    s_addc_u32 s3, s3, gv3 at gotpcrel32@hi+4
-; GFX9-NEXT:  BB1_3: ; %bb2
-; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x0
 ; GFX9-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x0
+; GFX9-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v3, 1
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s0
@@ -108,6 +130,7 @@ define amdgpu_kernel void @localize_globals(i1 %cond) {
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX9-NEXT:    global_store_dword v[0:1], v3, off
+; GFX9-NEXT:  BB1_4: ; %bb2
 ; GFX9-NEXT:    s_endpgm
 entry:
   br i1 %cond, label %bb0, label %bb1

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
index 51cc92525174..5e619cb4f47f 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/udiv.i64.ll
@@ -193,34 +193,14 @@ define amdgpu_ps i64 @s_udiv_i64(i64 inreg %num, i64 inreg %den) {
 ; CHECK-NEXT:    s_mov_b32 s6, 0
 ; CHECK-NEXT:    s_mov_b32 s7, -1
 ; CHECK-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
-; CHECK-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; CHECK-NEXT:    v_cmp_ne_u64_e64 vcc, s[4:5], 0
-; CHECK-NEXT:    s_cbranch_vccnz BB1_2
+; CHECK-NEXT:    v_cmp_eq_u64_e64 s[4:5], s[4:5], 0
+; CHECK-NEXT:    v_cmp_ne_u32_e64 s[6:7], 0, 1
+; CHECK-NEXT:    s_xor_b64 vcc, s[4:5], s[6:7]
+; CHECK-NEXT:    s_mov_b32 s4, 1
+; CHECK-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; CHECK-NEXT:    s_cbranch_vccz BB1_2
 ; CHECK-NEXT:  ; %bb.1:
-; CHECK-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; CHECK-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; CHECK-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; CHECK-NEXT:    v_mul_lo_u32 v1, v0, s2
-; CHECK-NEXT:    v_mul_hi_u32 v2, v0, s2
-; CHECK-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
-; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; CHECK-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
-; CHECK-NEXT:    v_mul_hi_u32 v1, v1, v0
-; CHECK-NEXT:    v_add_i32_e64 v2, s[4:5], v0, v1
-; CHECK-NEXT:    v_sub_i32_e64 v0, s[4:5], v0, v1
-; CHECK-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; CHECK-NEXT:    v_mul_hi_u32 v0, v0, s0
-; CHECK-NEXT:    v_mul_lo_u32 v1, v0, s2
-; CHECK-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
-; CHECK-NEXT:    v_subrev_i32_e32 v3, vcc, 1, v0
-; CHECK-NEXT:    v_sub_i32_e32 v4, vcc, s0, v1
-; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, s0, v1
-; CHECK-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v4
-; CHECK-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
-; CHECK-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
-; CHECK-NEXT:    s_branch BB1_3
-; CHECK-NEXT:  BB1_2:
+; CHECK-NEXT:    v_cvt_f32_u32_e32 v0, s2
 ; CHECK-NEXT:    v_mov_b32_e32 v1, s3
 ; CHECK-NEXT:    v_cvt_f32_u32_e32 v2, s3
 ; CHECK-NEXT:    s_sub_u32 s6, 0, s2
@@ -328,11 +308,11 @@ define amdgpu_ps i64 @s_udiv_i64(i64 inreg %num, i64 inreg %den) {
 ; CHECK-NEXT:    v_add_i32_e32 v2, vcc, v2, v7
 ; CHECK-NEXT:    v_sub_i32_e32 v5, vcc, s0, v5
 ; CHECK-NEXT:    v_subb_u32_e64 v3, s[4:5], v3, v2, vcc
-; CHECK-NEXT:    v_sub_i32_e64 v2, s[0:1], s1, v2
-; CHECK-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v5
-; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[0:1]
-; CHECK-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[0:1]
+; CHECK-NEXT:    v_sub_i32_e64 v2, s[4:5], s1, v2
+; CHECK-NEXT:    v_cmp_le_u32_e64 s[4:5], s2, v5
+; CHECK-NEXT:    v_cndmask_b32_e64 v6, 0, -1, s[4:5]
+; CHECK-NEXT:    v_cmp_le_u32_e64 s[4:5], s3, v3
+; CHECK-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[4:5]
 ; CHECK-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
 ; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, s3, v3
 ; CHECK-NEXT:    v_cndmask_b32_e32 v2, v7, v6, vcc
@@ -348,7 +328,36 @@ define amdgpu_ps i64 @s_udiv_i64(i64 inreg %num, i64 inreg %den) {
 ; CHECK-NEXT:    v_cndmask_b32_e32 v1, v8, v4, vcc
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
-; CHECK-NEXT:  BB1_3:
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  BB1_2: ; %Flow
+; CHECK-NEXT:    s_and_b32 s1, s4, 1
+; CHECK-NEXT:    s_cmp_lg_u32 s1, 0
+; CHECK-NEXT:    s_cbranch_scc0 BB1_4
+; CHECK-NEXT:  ; %bb.3:
+; CHECK-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; CHECK-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; CHECK-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
+; CHECK-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; CHECK-NEXT:    v_mul_lo_u32 v1, v0, s2
+; CHECK-NEXT:    v_mul_hi_u32 v2, v0, s2
+; CHECK-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
+; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; CHECK-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; CHECK-NEXT:    v_mul_hi_u32 v1, v1, v0
+; CHECK-NEXT:    v_add_i32_e64 v2, s[4:5], v0, v1
+; CHECK-NEXT:    v_sub_i32_e64 v0, s[4:5], v0, v1
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; CHECK-NEXT:    v_mul_hi_u32 v0, v0, s0
+; CHECK-NEXT:    v_mul_lo_u32 v1, v0, s2
+; CHECK-NEXT:    v_add_i32_e32 v2, vcc, 1, v0
+; CHECK-NEXT:    v_subrev_i32_e32 v3, vcc, 1, v0
+; CHECK-NEXT:    v_sub_i32_e32 v4, vcc, s0, v1
+; CHECK-NEXT:    v_cmp_ge_u32_e32 vcc, s0, v1
+; CHECK-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v4
+; CHECK-NEXT:    s_and_b64 s[0:1], s[0:1], vcc
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, v3, v0, vcc
+; CHECK-NEXT:  BB1_4:
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    ; return to shader part epilog

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
index 78f03210fb88..4253067492eb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/urem.i64.ll
@@ -192,34 +192,14 @@ define amdgpu_ps i64 @s_urem_i64(i64 inreg %num, i64 inreg %den) {
 ; CHECK-NEXT:    s_mov_b32 s6, 0
 ; CHECK-NEXT:    s_mov_b32 s7, -1
 ; CHECK-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
-; CHECK-NEXT:    v_cvt_f32_u32_e32 v0, s2
-; CHECK-NEXT:    v_cmp_ne_u64_e64 vcc, s[4:5], 0
-; CHECK-NEXT:    s_cbranch_vccnz BB1_2
+; CHECK-NEXT:    v_cmp_eq_u64_e64 s[4:5], s[4:5], 0
+; CHECK-NEXT:    v_cmp_ne_u32_e64 s[6:7], 0, 1
+; CHECK-NEXT:    s_xor_b64 vcc, s[4:5], s[6:7]
+; CHECK-NEXT:    s_mov_b32 s4, 1
+; CHECK-NEXT:    ; implicit-def: $vgpr0_vgpr1
+; CHECK-NEXT:    s_cbranch_vccz BB1_2
 ; CHECK-NEXT:  ; %bb.1:
-; CHECK-NEXT:    v_rcp_iflag_f32_e32 v0, v0
-; CHECK-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
-; CHECK-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; CHECK-NEXT:    v_mul_lo_u32 v1, v0, s2
-; CHECK-NEXT:    v_mul_hi_u32 v2, v0, s2
-; CHECK-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
-; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; CHECK-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
-; CHECK-NEXT:    v_mul_hi_u32 v1, v1, v0
-; CHECK-NEXT:    v_add_i32_e64 v2, s[4:5], v0, v1
-; CHECK-NEXT:    v_sub_i32_e64 v0, s[4:5], v0, v1
-; CHECK-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; CHECK-NEXT:    v_mul_hi_u32 v0, v0, s0
-; CHECK-NEXT:    v_mul_lo_u32 v0, v0, s2
-; CHECK-NEXT:    v_sub_i32_e32 v1, vcc, s0, v0
-; CHECK-NEXT:    v_cmp_le_u32_e32 vcc, s2, v1
-; CHECK-NEXT:    v_add_i32_e64 v2, s[4:5], s2, v1
-; CHECK-NEXT:    v_cmp_ge_u32_e64 s[0:1], s0, v0
-; CHECK-NEXT:    v_subrev_i32_e64 v0, s[2:3], s2, v1
-; CHECK-NEXT:    s_and_b64 vcc, vcc, s[0:1]
-; CHECK-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
-; CHECK-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
-; CHECK-NEXT:    s_branch BB1_3
-; CHECK-NEXT:  BB1_2:
+; CHECK-NEXT:    v_cvt_f32_u32_e32 v0, s2
 ; CHECK-NEXT:    v_mov_b32_e32 v1, s3
 ; CHECK-NEXT:    v_cvt_f32_u32_e32 v2, s3
 ; CHECK-NEXT:    s_sub_u32 s6, 0, s2
@@ -325,11 +305,11 @@ define amdgpu_ps i64 @s_urem_i64(i64 inreg %num, i64 inreg %den) {
 ; CHECK-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
 ; CHECK-NEXT:    v_sub_i32_e32 v2, vcc, s0, v5
 ; CHECK-NEXT:    v_subb_u32_e64 v3, s[4:5], v3, v0, vcc
-; CHECK-NEXT:    v_sub_i32_e64 v0, s[0:1], s1, v0
-; CHECK-NEXT:    v_cmp_le_u32_e64 s[0:1], s2, v2
-; CHECK-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[0:1]
-; CHECK-NEXT:    v_cmp_le_u32_e64 s[0:1], s3, v3
-; CHECK-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[0:1]
+; CHECK-NEXT:    v_sub_i32_e64 v0, s[4:5], s1, v0
+; CHECK-NEXT:    v_cmp_le_u32_e64 s[4:5], s2, v2
+; CHECK-NEXT:    v_cndmask_b32_e64 v4, 0, -1, s[4:5]
+; CHECK-NEXT:    v_cmp_le_u32_e64 s[4:5], s3, v3
+; CHECK-NEXT:    v_cndmask_b32_e64 v5, 0, -1, s[4:5]
 ; CHECK-NEXT:    v_subb_u32_e32 v0, vcc, v0, v1, vcc
 ; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, s3, v3
 ; CHECK-NEXT:    v_cndmask_b32_e32 v1, v5, v4, vcc
@@ -346,7 +326,36 @@ define amdgpu_ps i64 @s_urem_i64(i64 inreg %num, i64 inreg %den) {
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v3, v5, vcc
 ; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
 ; CHECK-NEXT:    v_cndmask_b32_e32 v0, v2, v0, vcc
-; CHECK-NEXT:  BB1_3:
+; CHECK-NEXT:    s_mov_b32 s4, 0
+; CHECK-NEXT:  BB1_2: ; %Flow
+; CHECK-NEXT:    s_and_b32 s1, s4, 1
+; CHECK-NEXT:    s_cmp_lg_u32 s1, 0
+; CHECK-NEXT:    s_cbranch_scc0 BB1_4
+; CHECK-NEXT:  ; %bb.3:
+; CHECK-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; CHECK-NEXT:    v_rcp_iflag_f32_e32 v0, v0
+; CHECK-NEXT:    v_mul_f32_e32 v0, 0x4f800000, v0
+; CHECK-NEXT:    v_cvt_u32_f32_e32 v0, v0
+; CHECK-NEXT:    v_mul_lo_u32 v1, v0, s2
+; CHECK-NEXT:    v_mul_hi_u32 v2, v0, s2
+; CHECK-NEXT:    v_sub_i32_e32 v3, vcc, 0, v1
+; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; CHECK-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
+; CHECK-NEXT:    v_mul_hi_u32 v1, v1, v0
+; CHECK-NEXT:    v_add_i32_e64 v2, s[4:5], v0, v1
+; CHECK-NEXT:    v_sub_i32_e64 v0, s[4:5], v0, v1
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
+; CHECK-NEXT:    v_mul_hi_u32 v0, v0, s0
+; CHECK-NEXT:    v_mul_lo_u32 v0, v0, s2
+; CHECK-NEXT:    v_sub_i32_e32 v1, vcc, s0, v0
+; CHECK-NEXT:    v_cmp_le_u32_e32 vcc, s2, v1
+; CHECK-NEXT:    v_add_i32_e64 v2, s[4:5], s2, v1
+; CHECK-NEXT:    v_cmp_ge_u32_e64 s[0:1], s0, v0
+; CHECK-NEXT:    v_subrev_i32_e64 v0, s[2:3], s2, v1
+; CHECK-NEXT:    s_and_b64 vcc, vcc, s[0:1]
+; CHECK-NEXT:    v_cndmask_b32_e32 v0, v1, v0, vcc
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, v2, v0, s[0:1]
+; CHECK-NEXT:  BB1_4:
 ; CHECK-NEXT:    v_readfirstlane_b32 s0, v0
 ; CHECK-NEXT:    s_mov_b32 s1, s0
 ; CHECK-NEXT:    ; return to shader part epilog

diff  --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.ll
index 049e7533f76a..f9b705037adf 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation-debug-info.ll
@@ -7,11 +7,11 @@ declare void @llvm.dbg.value(metadata, metadata, metadata) #0
 
 define amdgpu_kernel void @long_branch_dbg_value(float addrspace(1)* nocapture %arg, float %arg1) #1 !dbg !5 {
 ; GCN-LABEL: long_branch_dbg_value:
-; GCN:  BB0_4: ; %bb
+; GCN:  BB0_5: ; %bb
 ; GCN-NEXT:    ;DEBUG_VALUE: test_debug_value:globalptr_arg <- [DW_OP_plus_uconst 12, DW_OP_stack_value]
 ; GCN-NEXT:    .loc 1 0 42 is_stmt 0 ; /tmp/test_debug_value.cl:0:42
 ; GCN-NEXT:    s_getpc_b64 s{{\[}}[[PC_LO:[0-9]+]]:[[PC_HI:[0-9]+]]{{\]}}
-; GCN-NEXT:    s_add_u32 s[[PC_LO]], s[[PC_LO]], BB0_3-(BB0_4+4)
+; GCN-NEXT:    s_add_u32 s[[PC_LO]], s[[PC_LO]], BB0_4-(BB0_5+4)
 ; GCN-NEXT:    s_addc_u32 s[[PC_HI]], s[[PC_HI]], 0
 ; GCN-NEXT:    s_setpc_b64
 bb:

diff  --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
index e08b3c50cdae..3c7ad44e0b22 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
@@ -224,7 +224,7 @@ bb3:
 
 ; GCN-LABEL: {{^}}uniform_unconditional_min_long_forward_branch:
 ; GCN: s_cmp_eq_u32
-; GCN-NEXT: s_cbranch_scc0 [[BB2:BB[0-9]+_[0-9]+]]
+; GCN: s_cbranch_scc{{[0-1]}} [[BB2:BB[0-9]+_[0-9]+]]
 
 ; GCN-NEXT: [[LONG_JUMP0:BB[0-9]+_[0-9]+]]: ; %bb0
 ; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC0_LO:[0-9]+]]:[[PC0_HI:[0-9]+]]{{\]}}
@@ -232,24 +232,17 @@ bb3:
 ; GCN-NEXT: s_addc_u32 s[[PC0_HI]], s[[PC0_HI]], 0{{$}}
 ; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC0_LO]]:[[PC0_HI]]{{\]}}
 
-; GCN-NEXT: [[BB2]]: ; %bb2
-; GCN: v_mov_b32_e32 [[BB2_K:v[0-9]+]], 17
-; GCN: buffer_store_dword [[BB2_K]]
-
-; GCN-NEXT: [[LONG_JUMP1:BB[0-9]+_[0-9]+]]: ; %bb2
-; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC1_LO:[0-9]+]]:[[PC1_HI:[0-9]+]]{{\]}}
-; GCN-NEXT: s_add_u32 s[[PC1_LO]], s[[PC1_LO]], [[BB4:BB[0-9]_[0-9]+]]-([[LONG_JUMP1]]+4)
-; GCN-NEXT: s_addc_u32 s[[PC1_HI]], s[[PC1_HI]], 0{{$}}
-; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC1_LO]]:[[PC1_HI]]{{\]}}
-
-; GCN: [[BB3]]: ; %bb3
+; GCN: [[BB2]]: ; %bb3
 ; GCN: v_nop_e64
 ; GCN: v_nop_e64
 ; GCN: v_nop_e64
 ; GCN: v_nop_e64
 ; GCN: ;;#ASMEND
 
-; GCN-NEXT: [[BB4]]: ; %bb4
+; GCN: [[BB3]]:
+; GCN: v_mov_b32_e32 [[BB2_K:v[0-9]+]], 17
+; GCN: buffer_store_dword [[BB2_K]]
+
 ; GCN: v_mov_b32_e32 [[BB4_K:v[0-9]+]], 63
 ; GCN: buffer_store_dword [[BB4_K]]
 ; GCN-NEXT: s_endpgm
@@ -296,7 +289,7 @@ bb4:
 ; GCN-NEXT: s_sub_u32 s[[PC_LO]], s[[PC_LO]], ([[LONGBB]]+4)-[[LOOP]]
 ; GCN-NEXT: s_subb_u32 s[[PC_HI]], s[[PC_HI]], 0{{$}}
 ; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC_LO]]:[[PC_HI]]{{\]}}
-; GCN-NEXT: .Lfunc_end{{[0-9]+}}:
+; GCN-NEXT .Lfunc_end{{[0-9]+}}:
 define amdgpu_kernel void @uniform_unconditional_min_long_backward_branch(i32 addrspace(1)* %arg, i32 %arg1) {
 entry:
   br label %loop
@@ -317,23 +310,15 @@ loop:
 ; GCN-LABEL: {{^}}expand_requires_expand:
 ; GCN-NEXT: ; %bb.0: ; %bb0
 ; GCN: s_load_dword
-; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 0{{$}}
-; GCN-NEXT: s_cbranch_scc0 [[BB1:BB[0-9]+_[0-9]+]]
-
-; GCN-NEXT: [[LONGBB0:BB[0-9]+_[0-9]+]]: ; %bb0
+; GCN: {{s|v}}_cmp_lt_i32
+; GCN: s_cbranch
 
-; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC0_LO:[0-9]+]]:[[PC0_HI:[0-9]+]]{{\]}}
-; GCN-NEXT: s_add_u32 s[[PC0_LO]], s[[PC0_LO]], [[BB2:BB[0-9]_[0-9]+]]-([[LONGBB0]]+4)
-; GCN-NEXT: s_addc_u32 s[[PC0_HI]], s[[PC0_HI]], 0{{$}}
-; GCN-NEXT: s_setpc_b64 s{{\[}}[[PC0_LO]]:[[PC0_HI]]{{\]}}
-
-; GCN-NEXT: [[BB1]]: ; %bb1
-; GCN-NEXT: s_load_dword
+; GCN: s_load_dword
 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-NEXT: s_cmp_eq_u32 s{{[0-9]+}}, 3{{$}}
-; GCN-NEXT: s_cbranch_scc0 [[BB2:BB[0-9]_[0-9]+]]
+; GCN-NEXT: v_cmp_{{eq|ne}}_u32_e64
+; GCN: s_cbranch_vccz [[BB2:BB[0-9]_[0-9]+]]
 
-; GCN-NEXT: [[LONGBB1:BB[0-9]+_[0-9]+]]: ; %bb1
+; GCN-NEXT: [[LONGBB1:BB[0-9]+_[0-9]+]]:
 ; GCN-NEXT: s_getpc_b64 s{{\[}}[[PC1_LO:[0-9]+]]:[[PC1_HI:[0-9]+]]{{\]}}
 ; GCN-NEXT: s_add_u32 s[[PC1_LO]], s[[PC1_LO]], [[BB3:BB[0-9]+_[0-9]+]]-([[LONGBB1]]+4)
 ; GCN-NEXT: s_addc_u32 s[[PC1_HI]], s[[PC1_HI]], 0{{$}}
@@ -451,7 +436,7 @@ endif:
 ; GCN: v_nop_e64
 ; GCN: v_nop_e64
 ; GCN: ;;#ASMEND
-; GCN: s_cbranch_vccz [[RET:BB[0-9]+_[0-9]+]]
+; GCN: s_cbranch_{{vccz|vccnz}} [[RET:BB[0-9]+_[0-9]+]]
 
 ; GCN-NEXT: [[LONGBB:BB[0-9]+_[0-9]+]]: ; %loop
 ; GCN-NEXT: ; in Loop: Header=[[LOOP_BODY]] Depth=1
@@ -491,7 +476,7 @@ ret:
 
 ; GCN-LABEL: {{^}}long_branch_hang:
 ; GCN: s_cmp_lt_i32 s{{[0-9]+}}, 6
-; GCN: s_cbranch_scc0 [[LONG_BR_0:BB[0-9]+_[0-9]+]]
+; GCN: s_cbranch_scc{{[0-1]}} [[LONG_BR_0:BB[0-9]+_[0-9]+]]
 ; GCN-NEXT: BB{{[0-9]+_[0-9]+}}:
 
 ; GCN: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, [[LONG_BR_DEST0:BB[0-9]+_[0-9]+]]-(
@@ -499,14 +484,14 @@ ret:
 ; GCN-NEXT: s_setpc_b64
 
 ; GCN-NEXT: [[LONG_BR_0]]:
-; GCN-DAG: v_cmp_lt_i32
-; GCN-DAG: v_cmp_gt_i32
-; GCN: s_cbranch_vccnz
-
-; GCN: s_setpc_b64
 ; GCN: s_setpc_b64
 
 ; GCN: [[LONG_BR_DEST0]]
+
+; GCN: s_cbranch_vccnz
+; GCN-DAG: v_cmp_lt_i32
+; GCN-DAG: v_cmp_ge_i32
+
 ; GCN: s_cbranch_vccz
 ; GCN: s_setpc_b64
 

diff  --git a/llvm/test/CodeGen/AMDGPU/branch-uniformity.ll b/llvm/test/CodeGen/AMDGPU/branch-uniformity.ll
index c9c801fb1911..685b5f0dedad 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-uniformity.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-uniformity.ll
@@ -8,8 +8,8 @@
 ;
 ; CHECK-LABEL: {{^}}main:
 ; CHECK: ; %LOOP49
-; CHECK: s_cmp_lg_u32 s{{[0-9]+}}, 0
-; CHECK: s_cbranch_scc1
+; CHECK: s_cmp_{{lg|eq}}_u32 s{{[0-9]+}}, 0
+; CHECK: s_cbranch_scc{{[0-1]}}
 ; CHECK: ; %ENDIF53
 define amdgpu_vs float @main(i32 %in) {
 main_body:

diff  --git a/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll b/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
index 03e7e74c7c28..fa7b6f9ead31 100644
--- a/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
+++ b/llvm/test/CodeGen/AMDGPU/cf-loop-on-constant.ll
@@ -102,7 +102,7 @@ for.body:
 ; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80
 ; GCN: s_add_i32 s{{[0-9]+}}, s{{[0-9]+}}, 4
 
-; GCN: s_cbranch_vccnz [[LOOPBB]]
+; GCN: s_cbranch_{{vccz|vccnz}} [[LOOPBB]]
 ; GCN-NEXT: ; %bb.2
 ; GCN-NEXT: s_endpgm
 define amdgpu_kernel void @loop_arg_0(float addrspace(3)* %ptr, i32 %n) nounwind {

diff  --git a/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll b/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
index f6c30f8af1c3..f60c50cec784 100644
--- a/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
+++ b/llvm/test/CodeGen/AMDGPU/cgp-bitfield-extract.ll
@@ -27,13 +27,12 @@
 
 ; GCN-LABEL: {{^}}sink_ubfe_i32:
 ; GCN-NOT: lshr
-; GCN: s_cbranch_scc1
+; GCN: s_cbranch_scc{{[0-1]}}
 
-; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80008
-; GCN: BB0_2:
 ; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x70008
-
 ; GCN: BB0_3:
+; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80008
+
 ; GCN: buffer_store_dword
 ; GCN: s_endpgm
 define amdgpu_kernel void @sink_ubfe_i32(i32 addrspace(1)* %out, i32 %arg1) #0 {
@@ -122,16 +121,15 @@ ret:
 ; GCN-NOT: lshr
 ; VI: s_load_dword [[ARG:s[0-9]+]], s[0:1], 0x2c
 ; VI: s_bfe_u32 [[BFE:s[0-9]+]], [[ARG]], 0xc0004
-; GCN: s_cbranch_scc1
+; GCN: s_cbranch_scc{{[0-1]}}
 
-; SI: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80004
-; VI: v_mov_b32_e32 v{{[0-9]+}}, 0xff
-
-; GCN: BB2_2:
 ; SI: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x70004
 ; VI: v_mov_b32_e32 v{{[0-9]+}}, 0x7f
 
 ; GCN: BB2_3:
+; SI: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80004
+; VI: v_mov_b32_e32 v{{[0-9]+}}, 0xff
+
 ; GCN: buffer_store_short
 ; GCN: s_endpgm
 define amdgpu_kernel void @sink_ubfe_i16(i16 addrspace(1)* %out, i16 %arg1) #0 {
@@ -177,14 +175,13 @@ ret:
 
 ; GCN-LABEL: {{^}}sink_ubfe_i64_span_midpoint:
 
+; GCN: s_cbranch_scc{{[0-1]}} BB3_2
 ; GCN: v_alignbit_b32 v[[LO:[0-9]+]], s{{[0-9]+}}, v{{[0-9]+}}, 30
-; GCN: s_cbranch_scc1 BB3_2
-; GCN: v_and_b32_e32 v{{[0-9]+}}, 0xff, v[[LO]]
-
-; GCN: BB3_2:
 ; GCN: v_and_b32_e32 v{{[0-9]+}}, 0x7f, v[[LO]]
 
 ; GCN: BB3_3:
+; GCN: v_and_b32_e32 v{{[0-9]+}}, 0xff, v[[LO]]
+
 ; GCN: buffer_store_dwordx2
 define amdgpu_kernel void @sink_ubfe_i64_span_midpoint(i64 addrspace(1)* %out, i64 %arg1) #0 {
 entry:
@@ -226,14 +223,13 @@ ret:
 
 ; GCN-LABEL: {{^}}sink_ubfe_i64_low32:
 
-; GCN: s_cbranch_scc1 BB4_2
+; GCN: s_cbranch_scc{{[0-1]}} BB4_2
 
-; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x8000f
-
-; GCN: BB4_2:
 ; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x7000f
 
 ; GCN: BB4_3:
+; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x8000f
+
 ; GCN: buffer_store_dwordx2
 define amdgpu_kernel void @sink_ubfe_i64_low32(i64 addrspace(1)* %out, i64 %arg1) #0 {
 entry:
@@ -274,13 +270,12 @@ ret:
 ; OPT: ret
 
 ; GCN-LABEL: {{^}}sink_ubfe_i64_high32:
-; GCN: s_cbranch_scc1 BB5_2
-; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80003
-
-; GCN: BB5_2:
+; GCN: s_cbranch_scc{{[0-1]}} BB5_2
 ; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x70003
 
 ; GCN: BB5_3:
+; GCN: s_bfe_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80003
+
 ; GCN: buffer_store_dwordx2
 define amdgpu_kernel void @sink_ubfe_i64_high32(i64 addrspace(1)* %out, i64 %arg1) #0 {
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll b/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
index 2c1074ae62fc..2268b4675971 100644
--- a/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/control-flow-fastregalloc.ll
@@ -89,7 +89,7 @@ endif:
 }
 
 ; GCN-LABEL: {{^}}divergent_loop:
-; VGPR: workitem_private_segment_byte_size = 12{{$}}
+; VGPR: workitem_private_segment_byte_size = 16{{$}}
 
 ; GCN: {{^}}; %bb.0:
 
@@ -123,9 +123,10 @@ endif:
 ; GCN: buffer_load_dword v[[VAL_LOOP_RELOAD:[0-9]+]], off, s[0:3], 0 offset:[[LOAD0_OFFSET]] ; 4-byte Folded Reload
 ; GCN: v_subrev_i32_e32 [[VAL_LOOP:v[0-9]+]], vcc, v{{[0-9]+}}, v[[VAL_LOOP_RELOAD]]
 ; GCN: s_cmp_lg_u32
-; GCN: buffer_store_dword [[VAL_LOOP]], off, s[0:3], 0 offset:[[VAL_SUB_OFFSET:[0-9]+]] ; 4-byte Folded Spill
+; GCN: buffer_store_dword [[VAL_LOOP]], off, s[0:3], 0 offset:{{[0-9]+}} ; 4-byte Folded Spill
 ; GCN-NEXT: s_cbranch_scc1 [[LOOP]]
 
+; GCN: buffer_store_dword [[VAL_LOOP]], off, s[0:3], 0 offset:[[VAL_SUB_OFFSET:[0-9]+]] ; 4-byte Folded Spill
 
 ; GCN: [[END]]:
 ; VGPR: v_readlane_b32 s[[S_RELOAD_SAVEEXEC_LO:[0-9]+]], [[SPILL_VGPR]], [[SAVEEXEC_LO_LANE]]

diff  --git a/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll b/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll
index 8dda45fee0bf..d81d05f50f44 100644
--- a/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll
+++ b/llvm/test/CodeGen/AMDGPU/control-flow-optnone.ll
@@ -15,8 +15,8 @@
 ; GCN: s_mov_b64 exec
 
 ; GCN: s_or_b64 exec, exec
-; GCN: s_cmp_eq_u32
-; GCN: s_cbranch_scc1
+; GCN: {{[s|v]}}_cmp_eq_u32
+; GCN: s_cbranch
 ; GCN-NEXT: s_branch
 define amdgpu_kernel void @copytoreg_divergent_brcond(i32 %arg, i32 %arg1, i32 %arg2) #0 {
 bb:

diff  --git a/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll b/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
index 007ca13d53c9..4206c39caac7 100644
--- a/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
+++ b/llvm/test/CodeGen/AMDGPU/cross-block-use-is-not-abi-copy.ll
@@ -178,18 +178,18 @@ define amdgpu_kernel void @v3i16_registers(i1 %cond) #0 {
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], s4, 1
 ; GCN-NEXT:    s_and_b64 vcc, exec, s[4:5]
 ; GCN-NEXT:    s_mov_b32 s32, 0
-; GCN-NEXT:    s_cbranch_vccz BB4_2
-; GCN-NEXT:  ; %bb.1:
-; GCN-NEXT:    s_mov_b32 s4, 0
-; GCN-NEXT:    s_mov_b32 s5, s4
-; GCN-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-NEXT:    s_branch BB4_3
-; GCN-NEXT:  BB4_2: ; %if.else
+; GCN-NEXT:    s_cbranch_vccnz BB4_2
+; GCN-NEXT:  ; %bb.1: ; %if.else
 ; GCN-NEXT:    s_getpc_b64 s[4:5]
 ; GCN-NEXT:    s_add_u32 s4, s4, func_v3i16 at rel32@lo+4
 ; GCN-NEXT:    s_addc_u32 s5, s5, func_v3i16 at rel32@hi+4
 ; GCN-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; GCN-NEXT:    s_branch BB4_3
+; GCN-NEXT:  BB4_2:
+; GCN-NEXT:    s_mov_b32 s4, 0
+; GCN-NEXT:    s_mov_b32 s5, s4
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
 ; GCN-NEXT:  BB4_3: ; %if.end
 ; GCN-NEXT:    global_store_short v[0:1], v1, off
 ; GCN-NEXT:    global_store_dword v[0:1], v0, off
@@ -223,18 +223,18 @@ define amdgpu_kernel void @v3f16_registers(i1 %cond) #0 {
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], s4, 1
 ; GCN-NEXT:    s_and_b64 vcc, exec, s[4:5]
 ; GCN-NEXT:    s_mov_b32 s32, 0
-; GCN-NEXT:    s_cbranch_vccz BB5_2
-; GCN-NEXT:  ; %bb.1:
-; GCN-NEXT:    s_mov_b32 s4, 0
-; GCN-NEXT:    s_mov_b32 s5, s4
-; GCN-NEXT:    v_mov_b32_e32 v0, s4
-; GCN-NEXT:    v_mov_b32_e32 v1, s5
-; GCN-NEXT:    s_branch BB5_3
-; GCN-NEXT:  BB5_2: ; %if.else
+; GCN-NEXT:    s_cbranch_vccnz BB5_2
+; GCN-NEXT:  ; %bb.1: ; %if.else
 ; GCN-NEXT:    s_getpc_b64 s[4:5]
 ; GCN-NEXT:    s_add_u32 s4, s4, func_v3f16 at rel32@lo+4
 ; GCN-NEXT:    s_addc_u32 s5, s5, func_v3f16 at rel32@hi+4
 ; GCN-NEXT:    s_swappc_b64 s[30:31], s[4:5]
+; GCN-NEXT:    s_branch BB5_3
+; GCN-NEXT:  BB5_2:
+; GCN-NEXT:    s_mov_b32 s4, 0
+; GCN-NEXT:    s_mov_b32 s5, s4
+; GCN-NEXT:    v_mov_b32_e32 v0, s4
+; GCN-NEXT:    v_mov_b32_e32 v1, s5
 ; GCN-NEXT:  BB5_3: ; %if.end
 ; GCN-NEXT:    global_store_short v[0:1], v1, off
 ; GCN-NEXT:    global_store_dword v[0:1], v0, off

diff  --git a/llvm/test/CodeGen/AMDGPU/early-if-convert.ll b/llvm/test/CodeGen/AMDGPU/early-if-convert.ll
index 8e5aa8aa290b..66d5411cd978 100644
--- a/llvm/test/CodeGen/AMDGPU/early-if-convert.ll
+++ b/llvm/test/CodeGen/AMDGPU/early-if-convert.ll
@@ -30,7 +30,6 @@ endif:
 ; GCN: v_cmp_neq_f32_e32 vcc, 1.0, [[VAL]]
 ; GCN-DAG: v_add_f32_e32 [[ADD:v[0-9]+]], [[VAL]], [[VAL]]
 ; GCN-DAG: v_mul_f32_e32 [[MUL:v[0-9]+]], [[VAL]], [[VAL]]
-; GCN: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], [[ADD]], [[MUL]], vcc
 ; GCN: buffer_store_dword [[RESULT]]
 define amdgpu_kernel void @test_vccnz_ifcvt_diamond(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/i1-copy-phi-uniform-branch.ll b/llvm/test/CodeGen/AMDGPU/i1-copy-phi-uniform-branch.ll
index c65683d4fab6..2cbe00ed80fd 100644
--- a/llvm/test/CodeGen/AMDGPU/i1-copy-phi-uniform-branch.ll
+++ b/llvm/test/CodeGen/AMDGPU/i1-copy-phi-uniform-branch.ll
@@ -4,11 +4,11 @@
 
 ; GCN: ; %entry
 ; GCN:      s_cmp_eq_u32    s0, 0
-; GCN:      s_cbranch_scc1  [[PREEXIT:BB[0-9_]+]]
+; GCN:      s_cbranch_scc1  [[EXIT:BB[0-9_]+]]
 
 ; GCN: ; %blocka
 ; GCN:      s_cmp_eq_u32    s1, 0
-; GCN:      s_cbranch_scc1  [[EXIT:BB[0-9_]+]]
+; GCN:      s_cbranch_scc1  [[PREEXIT:BB[0-9_]+]]
 
 ; GCN: [[PREEXIT]]:
 ; GCN: [[EXIT]]:

diff  --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
index bde1cd5c4355..679a05c5d2ea 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
@@ -630,6 +630,7 @@ define amdgpu_kernel void @insertelement_v16f32_or_index(<16 x float> addrspace(
 ; GCN-LABEL: {{^}}broken_phi_bb:
 ; GCN: v_mov_b32_e32 [[PHIREG:v[0-9]+]], 8
 
+; GCN: {{BB[0-9]+_[0-9]+}}:
 ; GCN: [[BB2:BB[0-9]+_[0-9]+]]:
 ; GCN: v_cmp_le_i32_e32 vcc, s{{[0-9]+}}, [[PHIREG]]
 ; GCN: buffer_load_dword
@@ -645,7 +646,7 @@ define amdgpu_kernel void @insertelement_v16f32_or_index(<16 x float> addrspace(
 
 ; GCN: {{^; %bb.[0-9]}}:
 ; GCN: s_mov_b64 exec,
-; GCN: s_branch [[BB2]]
+; GCN: s_cbranch_vccnz [[BB2]]
 
 define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) #0 {
 bb:

diff  --git a/llvm/test/CodeGen/AMDGPU/infinite-loop.ll b/llvm/test/CodeGen/AMDGPU/infinite-loop.ll
index c18a076aad4e..db90e0c7449a 100644
--- a/llvm/test/CodeGen/AMDGPU/infinite-loop.ll
+++ b/llvm/test/CodeGen/AMDGPU/infinite-loop.ll
@@ -77,28 +77,36 @@ define amdgpu_kernel void @infinite_loops(i32 addrspace(1)* %out) {
 ; SI-LABEL: infinite_loops:
 ; SI:       ; %bb.0: ; %entry
 ; SI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x9
+; SI-NEXT:    s_mov_b64 s[2:3], -1
+; SI-NEXT:    s_cbranch_scc1 BB2_4
+; SI-NEXT:  ; %bb.1:
 ; SI-NEXT:    s_mov_b32 s3, 0xf000
 ; SI-NEXT:    s_mov_b32 s2, -1
-; SI-NEXT:    s_cbranch_scc0 BB2_3
-; SI-NEXT:  ; %bb.1: ; %loop1.preheader
-; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7
+; SI-NEXT:    v_mov_b32_e32 v0, 0x378
 ; SI-NEXT:    s_and_b64 vcc, exec, -1
-; SI-NEXT:  BB2_2: ; %loop1
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:  BB2_2:
+; SI:         s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; SI-NEXT:    s_cbranch_vccnz BB2_2
-; SI-NEXT:    s_branch BB2_5
-; SI-NEXT:  BB2_3:
-; SI-NEXT:    v_mov_b32_e32 v0, 0x378
-; SI-NEXT:    s_and_b64 vcc, exec, -1
-; SI-NEXT:  BB2_4: ; %loop2
-; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
+; SI-NEXT:  ; %bb.3:
+; SI-NEXT:    s_mov_b64 s[2:3], 0
+; SI-NEXT:  BB2_4:
+; SI-NEXT:    s_and_b64 vcc, exec, s[2:3]
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
-; SI-NEXT:    s_cbranch_vccnz BB2_4
-; SI-NEXT:  BB2_5: ; %DummyReturnBlock
+; SI-NEXT:    s_mov_b64 vcc, vcc
+; SI-NEXT:    s_cbranch_vccz BB2_7
+; SI-NEXT:  ; %bb.5:
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt expcnt(0)
+; SI-NEXT:    v_mov_b32_e32 v0, 0x3e7
+; SI-NEXT:    s_and_b64 vcc, exec, 0
+; SI-NEXT:  BB2_6:
+; SI:         buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_cbranch_vccz BB2_6
+; SI-NEXT:  BB2_7:
 ; SI-NEXT:    s_endpgm
+
 ; IR-LABEL: @infinite_loops(
 ; IR-NEXT:  entry:
 ; IR-NEXT:    br i1 undef, label [[LOOP1:%.*]], label [[LOOP2:%.*]]

diff  --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 070a36dd4a21..942110ccc1a7 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -1334,10 +1334,19 @@ define amdgpu_kernel void @insert_split_bb(<2 x i32> addrspace(1)* %out, i32 add
 ; SI-NEXT:    s_cbranch_scc0 BB26_2
 ; SI-NEXT:  ; %bb.1: ; %else
 ; SI-NEXT:    s_load_dword s1, s[6:7], 0x1
-; SI-NEXT:    s_branch BB26_3
-; SI-NEXT:  BB26_2: ; %if
+; SI-NEXT:    s_mov_b64 s[2:3], 0
+; SI-NEXT:    s_andn2_b64 vcc, exec, s[2:3]
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b64 vcc, vcc
+; SI-NEXT:    s_cbranch_vccz BB26_3
+; SI-NEXT:    s_branch BB26_4
+; SI-NEXT:  BB26_2:
+; SI-NEXT:    s_mov_b64 s[2:3], -1
+; SI-NEXT:    s_andn2_b64 vcc, exec, s[2:3]
+; SI-NEXT:    s_cbranch_vccnz BB26_4
+; SI-NEXT:  BB26_3: ; %if
 ; SI-NEXT:    s_load_dword s1, s[6:7], 0x0
-; SI-NEXT:  BB26_3: ; %endif
+; SI-NEXT:  BB26_4: ; %endif
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v0, s0
 ; SI-NEXT:    s_mov_b32 s7, 0x100f000
@@ -1353,12 +1362,20 @@ define amdgpu_kernel void @insert_split_bb(<2 x i32> addrspace(1)* %out, i32 add
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_cmp_lg_u32 s0, 0
 ; VI-NEXT:    s_cbranch_scc0 BB26_2
-; VI-NEXT:  ; %bb.1: ; %else
+; VI-NEXT: ; %bb.1: ; %else
 ; VI-NEXT:    s_load_dword s1, s[6:7], 0x4
-; VI-NEXT:    s_branch BB26_3
-; VI-NEXT:  BB26_2: ; %if
+; VI-NEXT:    s_mov_b64 s[2:3], 0
+; VI-NEXT:    s_andn2_b64 vcc, exec, s[2:3]
+; VI-NEXT:    s_cbranch_vccz BB26_3
+; VI-NEXT:    s_branch BB26_4
+; VI-NEXT:  BB26_2:
+; VI-NEXT:    s_mov_b64 s[2:3], -1
+; VI-NEXT:    s_andn2_b64 vcc, exec, s[2:3]
+; VI-NEXT:    s_cbranch_vccnz BB26_4
+; VI-NEXT:  BB26_3: ; %if
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    s_load_dword s1, s[6:7], 0x0
-; VI-NEXT:  BB26_3: ; %endif
+; VI-NEXT:  BB26_4: ; %endif
 ; VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; VI-NEXT:    v_mov_b32_e32 v0, s0
 ; VI-NEXT:    s_mov_b32 s7, 0x1100f000

diff  --git a/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll b/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
index 94d273d6cc40..d17d37b5e4d0 100644
--- a/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
+++ b/llvm/test/CodeGen/AMDGPU/nested-loop-conditions.ll
@@ -31,10 +31,10 @@ define amdgpu_kernel void @reduced_nested_loop_conditions(i64 addrspace(3)* noca
 ; GCN-NEXT:  BB0_3: ; %bb8
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NEXT:    ds_read_b32 v0, v0
-; GCN-NEXT:    s_and_b64 vcc, exec, -1
+; GCN-NEXT:    s_and_b64 vcc, exec, 0
 ; GCN-NEXT:  BB0_4: ; %bb9
 ; GCN-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-NEXT:    s_cbranch_vccnz BB0_4
+; GCN-NEXT:    s_cbranch_vccz BB0_4
 ; GCN-NEXT:  BB0_5: ; %DummyReturnBlock
 ; GCN-NEXT:    s_endpgm
 ; IR-LABEL: @reduced_nested_loop_conditions(
@@ -144,33 +144,39 @@ define amdgpu_kernel void @nested_loop_conditions(i64 addrspace(1)* nocapture %a
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cmp_lt_i32_e32 vcc, 8, v0
 ; GCN-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-NEXT:    s_cbranch_vccnz BB1_5
+; GCN-NEXT:    s_cbranch_vccnz BB1_6
+
 ; GCN-NEXT:  ; %bb.1: ; %bb14.lr.ph
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], 0
-; GCN-NEXT:  BB1_2: ; %bb14
+; GCN-NEXT:    s_branch BB1_3
+; GCN-NEXT:  BB1_2: ;   in Loop: Header=BB1_3 Depth=1
+; GCN-NEXT:    s_mov_b64 s[0:1], -1
+; GCN-NEXT:    ; implicit-def: $vgpr0
+; GCN-NEXT:    s_cbranch_execnz BB1_6
+; GCN-NEXT:  BB1_3: ; %bb14
 ; GCN-NEXT:    ; =>This Loop Header: Depth=1
-; GCN-NEXT:    ; Child Loop BB1_3 Depth 2
+; GCN-NEXT:    ;     Child Loop BB1_4 Depth 2
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 1, v0
 ; GCN-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-NEXT:    s_cbranch_vccnz BB1_5
-; GCN-NEXT:  BB1_3: ; %bb18
-; GCN-NEXT:    ; Parent Loop BB1_2 Depth=1
-; GCN-NEXT:    ; => This Inner Loop Header: Depth=2
+; GCN-NEXT:    s_cbranch_vccnz BB1_2
+; GCN-NEXT:  BB1_4: ; %bb18
+; GCN-NEXT:    ;   Parent Loop BB1_3 Depth=1
+; GCN-NEXT:    ; =>  This Inner Loop Header: Depth=2
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], 0
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NEXT:    v_cmp_lt_i32_e32 vcc, 8, v0
 ; GCN-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-NEXT:    s_cbranch_vccnz BB1_3
-; GCN-NEXT:  ; %bb.4: ; %bb21
-; GCN-NEXT:    ; in Loop: Header=BB1_2 Depth=1
+; GCN-NEXT:    s_cbranch_vccnz BB1_4
+; GCN-NEXT:    ; %bb.5: ; %bb21
+; GCN-NEXT:    ;   in Loop: Header=BB1_3 Depth=1
 ; GCN-NEXT:    buffer_load_dword v0, off, s[0:3], 0
 ; GCN-NEXT:    buffer_load_dword v1, off, s[0:3], 0
 ; GCN-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NEXT:    v_cmp_gt_i32_e32 vcc, 9, v1
-; GCN-NEXT:    s_and_b64 vcc, exec, vcc
-; GCN-NEXT:    s_cbranch_vccnz BB1_2
-; GCN-NEXT:  BB1_5: ; %bb31
+; GCN-NEXT:    v_cmp_lt_i32_e64 s[0:1], 8, v1
+; GCN-NEXT:    s_and_b64 vcc, exec, s[0:1]
+; GCN-NEXT:    s_cbranch_vccz BB1_3
+; GCN-NEXT:  BB1_6: ; %bb31
 ; GCN-NEXT:    v_mov_b32_e32 v0, 0
 ; GCN-NEXT:    buffer_store_dword v0, off, s[0:3], 0
 ; GCN-NEXT:    s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll
index 2be99267c4e0..fdfb9cd3ab19 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll
+++ b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll
@@ -35,12 +35,19 @@ bb4:
 }
 
 ; GCN-LABEL: {{^}}negated_cond_dominated_blocks:
-; GCN:   v_cmp_eq_u32_e64 [[CC:[^,]+]],
-; GCN: %bb4
+; GCN:   v_cmp_ne_u32_e64 [[CC1:[^,]+]],
+; GCN:   s_branch [[BB1:BB[0-9]+_[0-9]+]]
+; GCN: [[BB0:BB[0-9]+_[0-9]+]]
 ; GCN-NOT: v_cndmask_b32
 ; GCN-NOT: v_cmp
-; GCN:   s_andn2_b64 vcc, exec, [[CC]]
-; GCN:   s_cbranch_vccnz BB1_1
+; GCN: [[BB1]]:
+; GCN:   s_mov_b64 [[CC2:[^,]+]], -1
+; GCN:   s_mov_b64 vcc, [[CC1]]
+; GCN:   s_cbranch_vccz [[BB2:BB[0-9]+_[0-9]+]]
+; GCN:   s_mov_b64 [[CC2]], 0
+; GCN: [[BB2]]:
+; GCN:   s_andn2_b64 vcc, exec, [[CC2]]
+; GCN:   s_cbranch_vccnz [[BB0]]
 define amdgpu_kernel void @negated_cond_dominated_blocks(i32 addrspace(1)* %arg1) {
 bb:
   br label %bb2

diff  --git a/llvm/test/CodeGen/AMDGPU/salu-to-valu.ll b/llvm/test/CodeGen/AMDGPU/salu-to-valu.ll
index bfdd5c80b269..c8c60607c7fa 100644
--- a/llvm/test/CodeGen/AMDGPU/salu-to-valu.ll
+++ b/llvm/test/CodeGen/AMDGPU/salu-to-valu.ll
@@ -55,10 +55,10 @@ done:                                             ; preds = %loop
 
 ; GCN-LABEL: {{^}}smrd_valu:
 ; SI: s_movk_i32 [[OFFSET:s[0-9]+]], 0x2ee0
-; SI: s_mov_b32
 ; GCN: v_readfirstlane_b32 s[[PTR_LO:[0-9]+]], v{{[0-9]+}}
 ; GCN: v_readfirstlane_b32 s[[PTR_HI:[0-9]+]], v{{[0-9]+}}
-; SI: s_nop 3
+; SI: s_mov_b32
+; SI: s_nop 1
 ; SI: s_load_dword [[OUT:s[0-9]+]], s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, [[OFFSET]]
 
 ; CI: s_load_dword [[OUT:s[0-9]+]], s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0xbb8
@@ -265,7 +265,7 @@ entry:
 
 ; GCN-LABEL: {{^}}smrd_valu2_max_smrd_offset:
 ; GCN-NOHSA: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1020{{$}}
-; GCN-HSA: flat_load_dword v{{[0-9]}}, v[{{[0-9]+:[0-9]+}}]
+; GCN-HSA flat_load_dword v{{[0-9]}}, v{{[0-9]+:[0-9]+}}
 define amdgpu_kernel void @smrd_valu2_max_smrd_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(4)* %in) #1 {
 entry:
   %tmp = call i32 @llvm.amdgcn.workitem.id.x()

diff  --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index b066cebe5486..edbade80ad31 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -155,51 +155,42 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_subb_u32 s11, s7, s2
 ; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[8:9], s[0:1]
 ; GCN-IR-NEXT:    s_sub_u32 s6, s0, s8
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s6
 ; GCN-IR-NEXT:    s_subb_u32 s7, s1, s8
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[10:11], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[6:7], 0
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s7
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s10
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[0:1]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s10
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s11
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s11
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s11, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[0:1], 0, 0, vcc
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[14:15], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[12:13], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[12:13]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[12:13]
-; GCN-IR-NEXT:    s_branch BB0_7
-; GCN-IR-NEXT:  BB0_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[12:13], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[10:11], v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB0_6
-; GCN-IR-NEXT:  BB0_4: ; %udiv-preheader
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[10:11], v4
 ; GCN-IR-NEXT:    s_add_u32 s10, s6, -1
@@ -209,7 +200,7 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB0_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
@@ -234,20 +225,30 @@ define amdgpu_kernel void @s_test_sdiv(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
-; GCN-IR-NEXT:  BB0_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_3
+; GCN-IR-NEXT:    s_branch BB0_6
+; GCN-IR-NEXT:  BB0_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB0_7
+; GCN-IR-NEXT:  BB0_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB0_6: ; %Flow6
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB0_7: ; %udiv-end
+; GCN-IR-NEXT:  BB0_7: ; %Flow7
 ; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[8:9], s[2:3]
 ; GCN-IR-NEXT:    v_xor_b32_e32 v0, s0, v0
 ; GCN-IR-NEXT:    v_xor_b32_e32 v1, s1, v1
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s1
 ; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s0, v0
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    s_mov_b32 s6, -1
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = sdiv i64 %x, %y
@@ -1006,72 +1007,61 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48
 ;
 ; GCN-IR-LABEL: s_test_sdiv24_48:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xb
 ; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xc
-; GCN-IR-NEXT:    s_load_dword s4, s[0:1], 0xd
-; GCN-IR-NEXT:    s_load_dword s5, s[0:1], 0xe
+; GCN-IR-NEXT:    s_load_dword s6, s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dword s0, s[0:1], 0xe
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_sext_i32_i16 s3, s3
-; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[2:3], 24
+; GCN-IR-NEXT:    s_sext_i32_i16 s7, s0
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 24
 ; GCN-IR-NEXT:    s_ashr_i32 s2, s3, 31
-; GCN-IR-NEXT:    s_sext_i32_i16 s5, s5
 ; GCN-IR-NEXT:    s_mov_b32 s3, s2
-; GCN-IR-NEXT:    s_ashr_i32 s6, s5, 31
-; GCN-IR-NEXT:    s_ashr_i64 s[12:13], s[4:5], 24
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[2:3], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s10, s4, s2
+; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[6:7], 24
+; GCN-IR-NEXT:    s_ashr_i32 s6, s7, 31
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[0:1]
+; GCN-IR-NEXT:    s_sub_u32 s10, s0, s2
 ; GCN-IR-NEXT:    s_mov_b32 s7, s6
-; GCN-IR-NEXT:    s_subb_u32 s11, s5, s2
-; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[6:7], s[12:13]
-; GCN-IR-NEXT:    s_sub_u32 s8, s4, s6
-; GCN-IR-NEXT:    s_subb_u32 s9, s5, s6
-; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s8
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s10
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-IR-NEXT:    s_subb_u32 s11, s1, s2
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[6:7], s[8:9]
+; GCN-IR-NEXT:    s_sub_u32 s8, s0, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s8
+; GCN-IR-NEXT:    s_subb_u32 s9, s1, s6
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s9
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
+; GCN-IR-NEXT:    s_flbit_i32_b32 s14, s10
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s11
+; GCN-IR-NEXT:    s_add_i32 s14, s14, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s15, s11
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s15
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s14
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s11, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[0:1], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[10:11], 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[14:15], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[14:15]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[12:13], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_mov_b64 vcc, vcc
-; GCN-IR-NEXT:    s_cbranch_vccz BB9_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[12:13]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[12:13]
-; GCN-IR-NEXT:    s_branch BB9_7
-; GCN-IR-NEXT:  BB9_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[12:13], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[12:13], s[12:13], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[10:11], v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB9_6
-; GCN-IR-NEXT:  BB9_4: ; %udiv-preheader
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[10:11], v4
 ; GCN-IR-NEXT:    s_add_u32 s10, s8, -1
@@ -1081,7 +1071,7 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB9_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
@@ -1106,12 +1096,22 @@ define amdgpu_kernel void @s_test_sdiv24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB9_5
-; GCN-IR-NEXT:  BB9_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_3
+; GCN-IR-NEXT:    s_branch BB9_6
+; GCN-IR-NEXT:  BB9_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB9_7
+; GCN-IR-NEXT:  BB9_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB9_6: ; %Flow3
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
-; GCN-IR-NEXT:  BB9_7: ; %udiv-end
+; GCN-IR-NEXT:  BB9_7: ; %Flow4
 ; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[6:7], s[2:3]
 ; GCN-IR-NEXT:    v_xor_b32_e32 v0, s0, v0
 ; GCN-IR-NEXT:    v_xor_b32_e32 v1, s1, v1
@@ -1262,64 +1262,56 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_ashr_i32 s2, s7, 31
 ; GCN-IR-NEXT:    s_mov_b32 s3, s2
 ; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[2:3], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s8, s0, s2
-; GCN-IR-NEXT:    s_subb_u32 s9, s1, s2
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s8
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
+; GCN-IR-NEXT:    s_sub_u32 s6, s0, s2
+; GCN-IR-NEXT:    s_subb_u32 s7, s1, s2
+; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s6
+; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s7
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s8
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[0:1], 0, -1, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[8:9], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[10:11], s[0:1]
-; GCN-IR-NEXT:    s_mov_b32 s6, -1
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB10_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[10:11]
-; GCN-IR-NEXT:    s_branch BB10_7
-; GCN-IR-NEXT:  BB10_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[8:9], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], 24, v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB10_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB10_6
-; GCN-IR-NEXT:  BB10_4: ; %udiv-preheader
-; GCN-IR-NEXT:    s_add_u32 s7, s8, -1
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    s_add_u32 s8, s6, -1
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 58, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s10, s9, -1
+; GCN-IR-NEXT:    s_addc_u32 s9, s7, -1
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[0:1], 0, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB10_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s10
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s7, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s9
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s8, v6
 ; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s8, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v10, s6, v8
 ; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s9, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v11, s7, v8
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
 ; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
@@ -1331,8 +1323,16 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB10_5
-; GCN-IR-NEXT:  BB10_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_3
+; GCN-IR-NEXT:    s_branch BB10_6
+; GCN-IR-NEXT:  BB10_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB10_7
+; GCN-IR-NEXT:  BB10_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB10_6: ; %Flow5
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
@@ -1341,8 +1341,9 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    v_xor_b32_e32 v1, s3, v1
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
 ; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = sdiv i64 24, %x

diff  --git a/llvm/test/CodeGen/AMDGPU/setcc.ll b/llvm/test/CodeGen/AMDGPU/setcc.ll
index a259784bc278..91fec72cab51 100644
--- a/llvm/test/CodeGen/AMDGPU/setcc.ll
+++ b/llvm/test/CodeGen/AMDGPU/setcc.ll
@@ -397,9 +397,9 @@ endif:
 }
 
 ; FUNC-LABEL: setcc-i1-and-xor
-; GCN-DAG: v_cmp_nge_f32_e64 [[A:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0{{$}}
-; GCN-DAG: v_cmp_nle_f32_e64 [[B:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0
-; GCN: s_or_b64 s[2:3], [[A]], [[B]]
+; GCN-DAG: v_cmp_ge_f32_e64 [[A:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0{{$}}
+; GCN-DAG: v_cmp_le_f32_e64 [[B:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0
+; GCN: s_and_b64 s[2:3], [[A]], [[B]]
 define amdgpu_kernel void @setcc-i1-and-xor(i32 addrspace(1)* %out, float %cond) #0 {
 bb0:
   %tmp5 = fcmp oge float %cond, 0.000000e+00

diff  --git a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
index e4d73e498cce..11dc48ca8aa9 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
@@ -19,10 +19,18 @@ define amdgpu_kernel void @sgpr_if_else_salu_br(i32 addrspace(1)* %out, i32 %a,
 ; SI-NEXT:    s_cbranch_scc0 BB0_2
 ; SI-NEXT:  ; %bb.1: ; %else
 ; SI-NEXT:    s_add_i32 s0, s11, s0
-; SI-NEXT:    s_branch BB0_3
-; SI-NEXT:  BB0_2: ; %if
+; SI-NEXT:    s_mov_b64 s[2:3], 0
+; SI-NEXT:    s_andn2_b64 vcc, exec, s[2:3]
+; SI-NEXT:    s_cbranch_vccz BB0_3
+; SI-NEXT:    s_branch BB0_4
+; SI-NEXT:  BB0_2:
+; SI-NEXT:    s_mov_b64 s[2:3], -1
+; SI-NEXT:    ; implicit-def: $sgpr0
+; SI-NEXT:    s_andn2_b64 vcc, exec, s[2:3]
+; SI-NEXT:    s_cbranch_vccnz BB0_4
+; SI-NEXT:  BB0_3: ; %if
 ; SI-NEXT:    s_sub_i32 s0, s9, s10
-; SI-NEXT:  BB0_3: ; %endif
+; SI-NEXT:  BB0_4: ; %endif
 ; SI-NEXT:    s_add_i32 s0, s0, s8
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
@@ -58,15 +66,25 @@ define amdgpu_kernel void @sgpr_if_else_salu_br_opt(i32 addrspace(1)* %out, [8 x
 ; SI-NEXT:    s_cbranch_scc0 BB1_2
 ; SI-NEXT:  ; %bb.1: ; %else
 ; SI-NEXT:    s_load_dword s3, s[0:1], 0x2e
-; SI-NEXT:    s_load_dword s0, s[0:1], 0x37
-; SI-NEXT:    s_branch BB1_3
-; SI-NEXT:  BB1_2: ; %if
+; SI-NEXT:    s_load_dword s6, s[0:1], 0x37
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_add_i32 s3, s3, s6
+; SI-NEXT:    s_mov_b64 s[6:7], 0
+; SI-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT:    s_cbranch_vccz BB1_3
+; SI-NEXT:    s_branch BB1_4
+; SI-NEXT:  BB1_2:
+; SI-NEXT:    s_mov_b64 s[6:7], -1
+; SI-NEXT:    ; implicit-def: $sgpr3
+; SI-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT:    s_cbranch_vccnz BB1_4
+; SI-NEXT:  BB1_3: ; %if
 ; SI-NEXT:    s_load_dword s3, s[0:1], 0x1c
 ; SI-NEXT:    s_load_dword s0, s[0:1], 0x25
-; SI-NEXT:  BB1_3: ; %endif
 ; SI-NEXT:    s_waitcnt lgkmcnt(0)
-; SI-NEXT:    s_add_i32 s0, s3, s0
-; SI-NEXT:    s_add_i32 s0, s0, s2
+; SI-NEXT:    s_add_i32 s3, s3, s0
+; SI-NEXT:  BB1_4: ; %endif
+; SI-NEXT:    s_add_i32 s0, s3, s2
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    v_mov_b32_e32 v0, s0

diff  --git a/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll b/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll
index d1e5c389e69d..19a66082ad3d 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-copy.ll
@@ -211,14 +211,14 @@ ENDIF:                                            ; preds = %LOOP
 ; an assertion failure.
 
 ; CHECK-LABEL: {{^}}sample_v3:
-; CHECK: v_mov_b32_e32 v[[SAMPLE_LO:[0-9]+]], 11
-; CHECK: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 13
+; CHECK: v_mov_b32_e32 v[[SAMPLE_LO:[0-9]+]], 5
+; CHECK: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 7
 ; CHECK: s_branch
 
-; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_LO:[0-9]+]], 5
-; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 7
-
 ; CHECK: BB{{[0-9]+_[0-9]+}}:
+; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_LO:[0-9]+]], 11
+; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 13
+
 ; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[SAMPLE_LO]]:[[SAMPLE_HI]]{{\]}}
 ; CHECK: exp
 ; CHECK: s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
index faf6ca4cbcb2..fd3d3857404f 100644
--- a/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-annotate-cf.ll
@@ -200,10 +200,10 @@ define amdgpu_kernel void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32
 ; SI-NEXT:    s_and_b64 vcc, exec, vcc
 ; SI-NEXT:    s_cbranch_vccz BB3_8
 ; SI-NEXT:  ; %bb.6: ; %for.body
-; SI-NEXT:    s_and_b64 vcc, exec, -1
+; SI-NEXT:    s_and_b64 vcc, exec, 0
 ; SI-NEXT:  BB3_7: ; %self.loop
 ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
-; SI-NEXT:    s_cbranch_vccnz BB3_7
+; SI-NEXT:    s_cbranch_vccz BB3_7
 ; SI-NEXT:  BB3_8: ; %DummyReturnBlock
 ; SI-NEXT:    s_endpgm
 ;
@@ -246,10 +246,10 @@ define amdgpu_kernel void @loop_land_info_assert(i32 %c0, i32 %c1, i32 %c2, i32
 ; FLAT-NEXT:    s_and_b64 vcc, exec, vcc
 ; FLAT-NEXT:    s_cbranch_vccz BB3_8
 ; FLAT-NEXT:  ; %bb.6: ; %for.body
-; FLAT-NEXT:    s_and_b64 vcc, exec, -1
+; FLAT-NEXT:    s_and_b64 vcc, exec, 0
 ; FLAT-NEXT:  BB3_7: ; %self.loop
 ; FLAT-NEXT:    ; =>This Inner Loop Header: Depth=1
-; FLAT-NEXT:    s_cbranch_vccnz BB3_7
+; FLAT-NEXT:    s_cbranch_vccz BB3_7
 ; FLAT-NEXT:  BB3_8: ; %DummyReturnBlock
 ; FLAT-NEXT:    s_endpgm
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 97a6c3757b0b..dea0242b05db 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -130,49 +130,40 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[0:1], s[8:9]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[0:1], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[8:9], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[8:9]
-; GCN-IR-NEXT:    s_branch BB0_7
-; GCN-IR-NEXT:  BB0_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[8:9], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB0_6
-; GCN-IR-NEXT:  BB0_4: ; %udiv-preheader
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
 ; GCN-IR-NEXT:    s_add_u32 s8, s2, -1
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
@@ -182,7 +173,7 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB0_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
@@ -207,8 +198,18 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
-; GCN-IR-NEXT:  BB0_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_3
+; GCN-IR-NEXT:    s_branch BB0_6
+; GCN-IR-NEXT:  BB0_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB0_7
+; GCN-IR-NEXT:  BB0_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB0_6: ; %Flow6
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
@@ -217,16 +218,14 @@ define amdgpu_kernel void @s_test_srem(i64 addrspace(1)* %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_mul_hi_u32 v2, s2, v0
 ; GCN-IR-NEXT:    v_mul_lo_u32 v3, s3, v0
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, s2, v0
-; GCN-IR-NEXT:    s_mov_b32 s11, 0xf000
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
-; GCN-IR-NEXT:    s_mov_b32 s10, -1
-; GCN-IR-NEXT:    s_mov_b32 s8, s4
-; GCN-IR-NEXT:    s_mov_b32 s9, s5
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = urem i64 %x, %y
   store i64 %result, i64 addrspace(1)* %out
@@ -1027,82 +1026,73 @@ define amdgpu_kernel void @s_test_srem33_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    s_ashr_i32 s2, s7, 31
 ; GCN-IR-NEXT:    s_ashr_i64 s[10:11], s[0:1], 31
 ; GCN-IR-NEXT:    s_ashr_i32 s0, s1, 31
-; GCN-IR-NEXT:    s_mov_b32 s1, s0
 ; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[6:7], 31
 ; GCN-IR-NEXT:    s_mov_b32 s3, s2
+; GCN-IR-NEXT:    s_mov_b32 s1, s0
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[8:9], s[2:3]
-; GCN-IR-NEXT:    s_xor_b64 s[10:11], s[10:11], s[0:1]
-; GCN-IR-NEXT:    s_sub_u32 s8, s6, s2
-; GCN-IR-NEXT:    s_subb_u32 s9, s7, s2
-; GCN-IR-NEXT:    s_sub_u32 s10, s10, s0
-; GCN-IR-NEXT:    s_subb_u32 s11, s11, s0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[10:11], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[8:9], 0
-; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s11, 0
-; GCN-IR-NEXT:    s_or_b64 s[6:7], s[0:1], s[6:7]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s10
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s11
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s8
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s9
-; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[10:11], s[0:1]
+; GCN-IR-NEXT:    s_sub_u32 s6, s6, s2
+; GCN-IR-NEXT:    s_subb_u32 s7, s7, s2
+; GCN-IR-NEXT:    s_sub_u32 s8, s8, s0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s8
+; GCN-IR-NEXT:    s_subb_u32 s9, s9, s0
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s9
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
+; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[0:1], 0, 0, vcc
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[12:13], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[6:7], s[6:7], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[6:7], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB8_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s9
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[6:7]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[6:7]
-; GCN-IR-NEXT:    s_branch BB8_7
-; GCN-IR-NEXT:  BB8_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[10:11], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[10:11]
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[8:9], v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB8_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB8_6
-; GCN-IR-NEXT:  BB8_4: ; %udiv-preheader
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
-; GCN-IR-NEXT:    s_add_u32 s6, s10, -1
-; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[8:9], v4
+; GCN-IR-NEXT:    s_add_u32 s10, s8, -1
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v2, v3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s7, s11, -1
+; GCN-IR-NEXT:    s_addc_u32 s11, s9, -1
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB8_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s6, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s11
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s10, v6
 ; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
-; GCN-IR-NEXT:    v_and_b32_e32 v10, s10, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v10, s8, v8
 ; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v11, s11, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v11, s9, v8
 ; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
 ; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
@@ -1114,28 +1104,38 @@ define amdgpu_kernel void @s_test_srem33_64(i64 addrspace(1)* %out, i64 %x, i64
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB8_5
-; GCN-IR-NEXT:  BB8_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_3
+; GCN-IR-NEXT:    s_branch BB8_6
+; GCN-IR-NEXT:  BB8_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB8_7
+; GCN-IR-NEXT:  BB8_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB8_6: ; %Flow6
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
 ; GCN-IR-NEXT:  BB8_7: ; %udiv-end
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, s10, v1
-; GCN-IR-NEXT:    v_mul_hi_u32 v2, s10, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v3, s11, v0
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, s10, v0
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, s8, v1
+; GCN-IR-NEXT:    v_mul_hi_u32 v2, s8, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, s9, v0
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, s8, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s9
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
 ; GCN-IR-NEXT:    v_xor_b32_e32 v0, s2, v0
 ; GCN-IR-NEXT:    v_xor_b32_e32 v1, s3, v1
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
 ; GCN-IR-NEXT:    v_subrev_i32_e32 v0, vcc, s2, v0
-; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %1 = ashr i64 %x, 31
@@ -1185,72 +1185,61 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48
 ;
 ; GCN-IR-LABEL: s_test_srem24_48:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xb
 ; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xc
-; GCN-IR-NEXT:    s_load_dword s4, s[0:1], 0xd
-; GCN-IR-NEXT:    s_load_dword s5, s[0:1], 0xe
+; GCN-IR-NEXT:    s_load_dword s6, s[0:1], 0xd
+; GCN-IR-NEXT:    s_load_dword s0, s[0:1], 0xe
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    s_sext_i32_i16 s3, s3
-; GCN-IR-NEXT:    s_ashr_i64 s[6:7], s[2:3], 24
-; GCN-IR-NEXT:    s_sext_i32_i16 s5, s5
+; GCN-IR-NEXT:    s_sext_i32_i16 s7, s0
+; GCN-IR-NEXT:    s_ashr_i64 s[0:1], s[2:3], 24
 ; GCN-IR-NEXT:    s_ashr_i32 s2, s3, 31
-; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[4:5], 24
-; GCN-IR-NEXT:    s_ashr_i32 s4, s5, 31
+; GCN-IR-NEXT:    s_ashr_i32 s10, s7, 31
 ; GCN-IR-NEXT:    s_mov_b32 s3, s2
-; GCN-IR-NEXT:    s_mov_b32 s5, s4
-; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[6:7], s[2:3]
-; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[8:9], s[4:5]
-; GCN-IR-NEXT:    s_sub_u32 s6, s6, s2
-; GCN-IR-NEXT:    s_subb_u32 s7, s7, s2
-; GCN-IR-NEXT:    s_sub_u32 s8, s8, s4
-; GCN-IR-NEXT:    s_subb_u32 s9, s9, s4
-; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s8
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s9
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-IR-NEXT:    s_ashr_i64 s[8:9], s[6:7], 24
+; GCN-IR-NEXT:    s_mov_b32 s11, s10
+; GCN-IR-NEXT:    s_xor_b64 s[0:1], s[0:1], s[2:3]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[8:9], s[10:11]
+; GCN-IR-NEXT:    s_sub_u32 s6, s0, s2
+; GCN-IR-NEXT:    s_subb_u32 s7, s1, s2
+; GCN-IR-NEXT:    s_sub_u32 s8, s8, s10
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s8
+; GCN-IR-NEXT:    s_subb_u32 s9, s9, s10
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s9
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
+; GCN-IR-NEXT:    s_flbit_i32_b32 s12, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s9, 0
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
+; GCN-IR-NEXT:    s_add_i32 s12, s12, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s13, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s13
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s12
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[0:1], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[6:7], 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[12:13], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[12:13]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[10:11], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_mov_b64 vcc, vcc
-; GCN-IR-NEXT:    s_cbranch_vccz BB9_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[10:11]
-; GCN-IR-NEXT:    s_branch BB9_7
-; GCN-IR-NEXT:  BB9_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[10:11]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[10:11], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[10:11], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[10:11]
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB9_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB9_6
-; GCN-IR-NEXT:  BB9_4: ; %udiv-preheader
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
 ; GCN-IR-NEXT:    s_add_u32 s10, s8, -1
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
@@ -1260,7 +1249,7 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB9_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB9_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
@@ -1285,8 +1274,18 @@ define amdgpu_kernel void @s_test_srem24_48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB9_5
-; GCN-IR-NEXT:  BB9_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB9_3
+; GCN-IR-NEXT:    s_branch BB9_6
+; GCN-IR-NEXT:  BB9_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB9_7
+; GCN-IR-NEXT:  BB9_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB9_6: ; %Flow3
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
@@ -1445,57 +1444,49 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[6:7], s[0:1]
 ; GCN-IR-NEXT:    s_sub_u32 s2, s2, s0
 ; GCN-IR-NEXT:    s_subb_u32 s3, s3, s0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s2
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s3
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
+; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s6
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[0:1], 0, -1, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[8:9], s[0:1]
-; GCN-IR-NEXT:    s_mov_b32 s6, -1
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB10_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[8:9]
-; GCN-IR-NEXT:    s_branch BB10_7
-; GCN-IR-NEXT:  BB10_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[6:7], s[6:7], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[6:7]
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], 24, v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB10_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB10_6
-; GCN-IR-NEXT:  BB10_4: ; %udiv-preheader
-; GCN-IR-NEXT:    s_add_u32 s7, s2, -1
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    s_add_u32 s6, s2, -1
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 58, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s8, s3, -1
+; GCN-IR-NEXT:    s_addc_u32 s7, s3, -1
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[0:1], 0, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB10_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s8
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s7, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s6, v6
 ; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
 ; GCN-IR-NEXT:    v_and_b32_e32 v10, s2, v8
@@ -1512,8 +1503,16 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB10_5
-; GCN-IR-NEXT:  BB10_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB10_3
+; GCN-IR-NEXT:    s_branch BB10_6
+; GCN-IR-NEXT:  BB10_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB10_7
+; GCN-IR-NEXT:  BB10_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB10_6: ; %Flow5
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
@@ -1527,6 +1526,7 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = srem i64 24, %x

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 7a4065eeac46..ef0c50c80ded 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -131,49 +131,40 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[0:1], s[8:9]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[0:1], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[8:9], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[8:9]
-; GCN-IR-NEXT:    s_branch BB0_7
-; GCN-IR-NEXT:  BB0_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[8:9], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB0_6
-; GCN-IR-NEXT:  BB0_4: ; %udiv-preheader
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
 ; GCN-IR-NEXT:    s_add_u32 s6, s2, -1
@@ -183,7 +174,7 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB0_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
@@ -208,8 +199,18 @@ define amdgpu_kernel void @s_test_udiv_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
-; GCN-IR-NEXT:  BB0_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_3
+; GCN-IR-NEXT:    s_branch BB0_6
+; GCN-IR-NEXT:  BB0_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB0_7
+; GCN-IR-NEXT:  BB0_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB0_6: ; %Flow6
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
@@ -825,66 +826,55 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ;
 ; GCN-IR-LABEL: s_test_udiv24_i48:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
+; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_load_dword s2, s[0:1], 0xb
 ; GCN-IR-NEXT:    s_load_dword s3, s[0:1], 0xc
 ; GCN-IR-NEXT:    s_load_dword s6, s[0:1], 0xd
-; GCN-IR-NEXT:    s_load_dword s5, s[0:1], 0xe
-; GCN-IR-NEXT:    s_mov_b32 s4, 0xffff
-; GCN-IR-NEXT:    s_mov_b32 s7, 0xff000000
+; GCN-IR-NEXT:    s_load_dword s7, s[0:1], 0xe
+; GCN-IR-NEXT:    s_mov_b32 s8, 0xffff
+; GCN-IR-NEXT:    s_mov_b32 s9, 0xff000000
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_and_b32 s3, s3, s4
-; GCN-IR-NEXT:    s_and_b32 s2, s2, s7
-; GCN-IR-NEXT:    s_and_b32 s5, s5, s4
-; GCN-IR-NEXT:    s_and_b32 s4, s6, s7
-; GCN-IR-NEXT:    s_lshr_b64 s[6:7], s[2:3], 24
-; GCN-IR-NEXT:    s_lshr_b64 s[2:3], s[4:5], 24
-; GCN-IR-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s2
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s3
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
+; GCN-IR-NEXT:    s_and_b32 s1, s3, s8
+; GCN-IR-NEXT:    s_and_b32 s0, s2, s9
+; GCN-IR-NEXT:    s_and_b32 s3, s7, s8
+; GCN-IR-NEXT:    s_and_b32 s2, s6, s9
+; GCN-IR-NEXT:    s_lshr_b64 s[2:3], s[2:3], 24
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
+; GCN-IR-NEXT:    s_lshr_b64 s[6:7], s[0:1], 24
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[0:1], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[6:7], 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], s[10:11]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[8:9], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_mov_b64 vcc, vcc
-; GCN-IR-NEXT:    s_cbranch_vccz BB7_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[8:9]
-; GCN-IR-NEXT:    s_branch BB7_7
-; GCN-IR-NEXT:  BB7_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[8:9], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB7_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB7_6
-; GCN-IR-NEXT:  BB7_4: ; %udiv-preheader
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
 ; GCN-IR-NEXT:    s_add_u32 s6, s2, -1
@@ -894,7 +884,7 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB7_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB7_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
@@ -919,8 +909,18 @@ define amdgpu_kernel void @s_test_udiv24_i48(i48 addrspace(1)* %out, i48 %x, i48
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB7_5
-; GCN-IR-NEXT:  BB7_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_3
+; GCN-IR-NEXT:    s_branch BB7_6
+; GCN-IR-NEXT:  BB7_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB7_7
+; GCN-IR-NEXT:  BB7_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB7_6: ; %Flow3
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
@@ -1053,58 +1053,50 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-LABEL: s_test_udiv_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_mov_b32 s2, -1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
+; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[0:1], 0, -1, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[2:3], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[8:9], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB8_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[8:9]
-; GCN-IR-NEXT:    s_branch BB8_7
-; GCN-IR-NEXT:  BB8_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[2:3], s[2:3], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], 24, v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB8_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB8_6
-; GCN-IR-NEXT:  BB8_4: ; %udiv-preheader
-; GCN-IR-NEXT:    s_add_u32 s3, s6, -1
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    s_add_u32 s2, s6, -1
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 58, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s8, s7, -1
+; GCN-IR-NEXT:    s_addc_u32 s3, s7, -1
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[0:1], 0, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB8_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB8_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s8
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s3, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s2, v6
 ; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
 ; GCN-IR-NEXT:    v_and_b32_e32 v10, s6, v8
@@ -1121,14 +1113,22 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB8_5
-; GCN-IR-NEXT:  BB8_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB8_3
+; GCN-IR-NEXT:    s_branch BB8_6
+; GCN-IR-NEXT:  BB8_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB8_7
+; GCN-IR-NEXT:  BB8_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB8_6: ; %Flow5
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
 ; GCN-IR-NEXT:  BB8_7: ; %udiv-end
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
-; GCN-IR-NEXT:    s_mov_b32 s6, s2
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
 ; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = udiv i64 24, %x
@@ -1534,48 +1534,39 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
+; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 59, v2
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[0:1], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[6:7], 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[2:3], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[2:3], s[2:3], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB11_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[2:3]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[2:3]
-; GCN-IR-NEXT:    s_branch BB11_7
-; GCN-IR-NEXT:  BB11_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[2:3], s[2:3], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
+; GCN-IR-NEXT:    s_cbranch_vccz BB11_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB11_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB11_6
-; GCN-IR-NEXT:  BB11_4: ; %udiv-preheader
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB11_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v3
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffc4, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], 0, -1, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB11_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
@@ -1598,8 +1589,18 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[0:1], 0, v7, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB11_5
-; GCN-IR-NEXT:  BB11_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB11_3
+; GCN-IR-NEXT:    s_branch BB11_6
+; GCN-IR-NEXT:  BB11_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB11_7
+; GCN-IR-NEXT:  BB11_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB11_6: ; %Flow5
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1

diff  --git a/llvm/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll b/llvm/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll
index 60ab7631a101..1bb427693171 100644
--- a/llvm/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll
+++ b/llvm/test/CodeGen/AMDGPU/unhandled-loop-condition-assertion.ll
@@ -39,7 +39,6 @@ for.end:                                          ; preds = %for.body, %entry
 }
 
 ; COMMON-LABEL: {{^}}branch_false:
-; SI: s_cbranch_vccnz
 ; SI: s_cbranch_scc1
 ; SI: s_endpgm
 define amdgpu_kernel void @branch_false(i8 addrspace(1)* nocapture %main, i32 %main_stride) #0 {

diff  --git a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
index 2c64b1bdb3d2..b5e0ed3d61c0 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-cfg.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -mcpu=verde -amdgpu-early-ifcvt=0 -machine-sink-split-probability-threshold=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=SI %s
-; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-early-ifcvt=0 -machine-sink-split-probability-threshold=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s
+; RUN: llc -march=amdgcn -mcpu=verde -amdgpu-early-ifcvt=0 -machine-sink-split-probability-threshold=0 -structurizecfg-skip-uniform-regions -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-early-ifcvt=0 -machine-sink-split-probability-threshold=0 -structurizecfg-skip-uniform-regions -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN -check-prefix=VI %s
 
 ; GCN-LABEL: {{^}}uniform_if_scc:
 ; GCN-DAG: s_cmp_eq_u32 s{{[0-9]+}}, 0

diff  --git a/llvm/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll b/llvm/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll
index a23eb2b137db..25b2a8dd6c7d 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-loop-inside-nonuniform.ll
@@ -8,7 +8,7 @@
 ; CHECK-NEXT: s_cbranch_execz BB{{[0-9]+_[0-9]+}}
 
 ; CHECK: [[LOOP_BODY_LABEL:BB[0-9]+_[0-9]+]]: ; %loop_body
-; CHECK: s_cbranch_scc0 [[LOOP_BODY_LABEL]]
+; CHECK: s_cbranch_scc1 [[LOOP_BODY_LABEL]]
 
 ; CHECK: s_endpgm
 define amdgpu_ps void @test1(<8 x i32> inreg %rsrc, <2 x i32> %addr.base, i32 %y, i32 %p) {

diff  --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 9a6f7002ca87..a652fb17f959 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -130,49 +130,40 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    s_load_dwordx2 s[2:3], s[0:1], 0xd
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[0:1], s[8:9]
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
+; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s6
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s3, 0
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
+; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
+; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s7
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s11
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s10
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v2, v3
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[0:1], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[2:3], 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[10:11], 0, 0, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[8:9], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[8:9]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[8:9]
-; GCN-IR-NEXT:    s_branch BB0_7
-; GCN-IR-NEXT:  BB0_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], s[8:9]
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[8:9], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[8:9], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[8:9]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[4:5], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB0_6
-; GCN-IR-NEXT:  BB0_4: ; %udiv-preheader
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_not_b32_e32 v2, v2
 ; GCN-IR-NEXT:    s_add_u32 s8, s2, -1
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v4
@@ -182,7 +173,7 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], -1, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB0_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB0_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
@@ -207,8 +198,18 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB0_5
-; GCN-IR-NEXT:  BB0_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB0_3
+; GCN-IR-NEXT:    s_branch BB0_6
+; GCN-IR-NEXT:  BB0_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB0_7
+; GCN-IR-NEXT:  BB0_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB0_6: ; %Flow6
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
@@ -217,16 +218,14 @@ define amdgpu_kernel void @s_test_urem_i64(i64 addrspace(1)* %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    v_mul_hi_u32 v2, s2, v0
 ; GCN-IR-NEXT:    v_mul_lo_u32 v3, s3, v0
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, s2, v0
-; GCN-IR-NEXT:    s_mov_b32 s11, 0xf000
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
-; GCN-IR-NEXT:    s_mov_b32 s10, -1
-; GCN-IR-NEXT:    s_mov_b32 s8, s4
-; GCN-IR-NEXT:    s_mov_b32 s9, s5
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = urem i64 %x, %y
   store i64 %result, i64 addrspace(1)* %out
@@ -861,58 +860,50 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-LABEL: s_test_urem_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
-; GCN-IR-NEXT:    s_mov_b32 s2, -1
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
+; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc5, v2
-; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[0:1], 0, -1, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[6:7], 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[2:3], 0, -1, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[8:9], s[8:9], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[8:9], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB6_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[8:9]
-; GCN-IR-NEXT:    s_branch BB6_7
-; GCN-IR-NEXT:  BB6_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[2:3], s[2:3], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
+; GCN-IR-NEXT:    s_cbranch_vccz BB6_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], 24, v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB6_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB6_6
-; GCN-IR-NEXT:  BB6_4: ; %udiv-preheader
-; GCN-IR-NEXT:    s_add_u32 s3, s6, -1
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB6_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
+; GCN-IR-NEXT:    s_add_u32 s2, s6, -1
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], 24, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 58, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    s_addc_u32 s8, s7, -1
+; GCN-IR-NEXT:    s_addc_u32 s3, s7, -1
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[0:1], 0, 0, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB6_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB6_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, s8
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s3, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, s3
+; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, s2, v6
 ; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, v2, v7, vcc
 ; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
 ; GCN-IR-NEXT:    v_and_b32_e32 v10, s6, v8
@@ -929,8 +920,16 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[0:1], v7, v11, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB6_5
-; GCN-IR-NEXT:  BB6_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB6_3
+; GCN-IR-NEXT:    s_branch BB6_6
+; GCN-IR-NEXT:  BB6_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, 0
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, 24, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB6_7
+; GCN-IR-NEXT:  BB6_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB6_6: ; %Flow5
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
@@ -939,14 +938,13 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    v_mul_hi_u32 v2, s6, v0
 ; GCN-IR-NEXT:    v_mul_lo_u32 v3, s7, v0
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, s6, v0
-; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
-; GCN-IR-NEXT:    s_mov_b32 s0, s4
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = urem i64 24, %x
   store i64 %result, i64 addrspace(1)* %out
@@ -1064,48 +1062,39 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
 ; GCN-IR-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT:    s_flbit_i32_b32 s0, s6
-; GCN-IR-NEXT:    s_flbit_i32_b32 s1, s7
-; GCN-IR-NEXT:    s_add_i32 s0, s0, 32
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s1
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, s0
+; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s6
+; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s7
+; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s3
+; GCN-IR-NEXT:    v_mov_b32_e32 v1, s2
 ; GCN-IR-NEXT:    v_cmp_eq_u32_e64 vcc, s7, 0
 ; GCN-IR-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 59, v2
-; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[0:1], 0, 0, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[6:7], 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[2:3], 0, 0, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[6:7], 0
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[0:1], 63, v[0:1]
-; GCN-IR-NEXT:    s_or_b64 s[2:3], s[2:3], vcc
-; GCN-IR-NEXT:    s_or_b64 s[0:1], s[2:3], s[0:1]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB7_2
-; GCN-IR-NEXT:  ; %bb.1:
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[2:3]
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[2:3]
-; GCN-IR-NEXT:    s_branch BB7_7
-; GCN-IR-NEXT:  BB7_2: ; %udiv-bb1
+; GCN-IR-NEXT:    s_or_b64 s[0:1], s[0:1], vcc
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
+; GCN-IR-NEXT:    s_xor_b64 s[2:3], s[0:1], -1
+; GCN-IR-NEXT:    s_and_b64 s[2:3], s[2:3], vcc
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[2:3]
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_4
+; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
 ; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v4, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[0:1], v[3:4], v[0:1]
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 63, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], s[6:7], v0
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GCN-IR-NEXT:    s_cbranch_vccz BB7_4
-; GCN-IR-NEXT:  ; %bb.3:
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:    s_branch BB7_6
-; GCN-IR-NEXT:  BB7_4: ; %udiv-preheader
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[0:1]
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_5
+; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_lshr_b64 v[6:7], s[6:7], v3
 ; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffc4, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[0:1], 0, -1, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
-; GCN-IR-NEXT:  BB7_5: ; %udiv-do-while
+; GCN-IR-NEXT:  BB7_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
@@ -1128,8 +1117,18 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[0:1], 0, v7, s[0:1]
 ; GCN-IR-NEXT:    s_and_b64 vcc, exec, vcc
 ; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
-; GCN-IR-NEXT:    s_cbranch_vccz BB7_5
-; GCN-IR-NEXT:  BB7_6: ; %udiv-loop-exit
+; GCN-IR-NEXT:    s_cbranch_vccz BB7_3
+; GCN-IR-NEXT:    s_branch BB7_6
+; GCN-IR-NEXT:  BB7_4:
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s7
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v1, v0, 0, s[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v0, v0, 0, s[0:1]
+; GCN-IR-NEXT:    s_branch BB7_7
+; GCN-IR-NEXT:  BB7_5:
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
+; GCN-IR-NEXT:  BB7_6: ; %Flow5
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
 ; GCN-IR-NEXT:    v_or_b32_e32 v0, v2, v0
 ; GCN-IR-NEXT:    v_or_b32_e32 v1, v3, v1
@@ -1137,15 +1136,13 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-IR-NEXT:    v_mul_hi_u32 v2, v0, 24
 ; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, 24
 ; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, 24
-; GCN-IR-NEXT:    s_mov_b32 s3, 0xf000
-; GCN-IR-NEXT:    s_mov_b32 s2, -1
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_mov_b32_e32 v2, s7
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, s6, v0
-; GCN-IR-NEXT:    s_mov_b32 s0, s4
-; GCN-IR-NEXT:    s_mov_b32 s1, s5
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
-; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; GCN-IR-NEXT:    s_mov_b32 s7, 0xf000
+; GCN-IR-NEXT:    s_mov_b32 s6, -1
+; GCN-IR-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
 ; GCN-IR-NEXT:    s_endpgm
   %result = urem i64 %x, 24
   store i64 %result, i64 addrspace(1)* %out

diff  --git a/llvm/test/CodeGen/AMDGPU/valu-i1.ll b/llvm/test/CodeGen/AMDGPU/valu-i1.ll
index 8d522ffd1158..01d51305e581 100644
--- a/llvm/test/CodeGen/AMDGPU/valu-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/valu-i1.ll
@@ -159,8 +159,8 @@ exit:
 ; SI: [[LABEL_LOOP:BB[0-9]+_[0-9]+]]:
 ; SI: buffer_load_dword
 ; SI-DAG: buffer_store_dword
-; SI-DAG: s_cmpk_eq_i32 s{{[0-9+]}}, 0x100
-; SI: s_cbranch_scc0 [[LABEL_LOOP]]
+; SI-DAG: s_cmpk_lg_i32 s{{[0-9+]}}, 0x100
+; SI: s_cbranch_scc1 [[LABEL_LOOP]]
 ; SI: [[LABEL_EXIT]]:
 ; SI: s_endpgm
 

diff  --git a/llvm/test/CodeGen/AMDGPU/wqm.ll b/llvm/test/CodeGen/AMDGPU/wqm.ll
index e3183989e7d2..167d8fa21ccb 100644
--- a/llvm/test/CodeGen/AMDGPU/wqm.ll
+++ b/llvm/test/CodeGen/AMDGPU/wqm.ll
@@ -652,13 +652,13 @@ main_body:
 ; CHECK-DAG: v_mov_b32_e32 [[CTR:v[0-9]+]], 0
 ; CHECK-DAG: s_mov_b32 [[SEVEN:s[0-9]+]], 0x40e00000
 
+; CHECK: ; %body
+; CHECK: v_add_f32_e32 [[CTR]], 2.0, [[CTR]]
 ; CHECK: [[LOOPHDR:BB[0-9]+_[0-9]+]]: ; %loop
 ; CHECK: v_cmp_lt_f32_e32 vcc, [[SEVEN]], [[CTR]]
-; CHECK: s_cbranch_vccnz
+; CHECK: s_cbranch_vccz
 
-; CHECK: ; %body
-; CHECK: v_add_f32_e32 [[CTR]], 2.0, [[CTR]]
-; CHECK: s_branch [[LOOPHDR]]
+; CHECK: s_cbranch_vccnz [[LOOPHDR]]
 
 ; CHECK: ; %break
 ; CHECK: ; return
@@ -769,13 +769,12 @@ else:
 ; CHECK: s_wqm_b64 exec, exec
 ; CHECK: s_cmp_
 ; CHECK-NEXT: s_cbranch_scc
-; CHECK: ; %if
-; CHECK: s_and_b64 exec, exec, [[ORIG]]
-; CHECK: image_sample
 ; CHECK: ; %else
-; CHECK: s_and_b64 exec, exec, [[ORIG]]
+; CHECK: image_sample
+; CHECK: ; %if
 ; CHECK: image_sample
 ; CHECK: ; %end
+; CHECK: s_and_b64 exec, exec, [[ORIG]]
 define amdgpu_ps <4 x float> @test_scc(i32 inreg %sel, i32 %idx) #1 {
 main_body:
   %cc = icmp sgt i32 %sel, 0


        


More information about the llvm-commits mailing list