[llvm] dd05129 - [AMDGPU] Enable GCNRewritePartialRegUses pass by default. (#72975)

via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 14 05:10:32 PST 2023


Author: Valery Pykhtin
Date: 2023-12-14T14:10:27+01:00
New Revision: dd051295bc0a9c6a12729f81c59685f50574e1fb

URL: https://github.com/llvm/llvm-project/commit/dd051295bc0a9c6a12729f81c59685f50574e1fb
DIFF: https://github.com/llvm/llvm-project/commit/dd051295bc0a9c6a12729f81c59685f50574e1fb.diff

LOG: [AMDGPU] Enable GCNRewritePartialRegUses pass by default. (#72975)

Let's try once again after #69957 has landed.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
    llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
    llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir
    llvm/test/CodeGen/AMDGPU/dead-lane.mir
    llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
    llvm/test/CodeGen/AMDGPU/idiv-licm.ll
    llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
    llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
    llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
    llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
    llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
    llvm/test/CodeGen/AMDGPU/load-global-i16.ll
    llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll
    llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
    llvm/test/CodeGen/AMDGPU/mad_64_32.ll
    llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
    llvm/test/CodeGen/AMDGPU/mul.ll
    llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
    llvm/test/CodeGen/AMDGPU/sdiv64.ll
    llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
    llvm/test/CodeGen/AMDGPU/spill-vgpr.ll
    llvm/test/CodeGen/AMDGPU/srem64.ll
    llvm/test/CodeGen/AMDGPU/udiv64.ll
    llvm/test/CodeGen/AMDGPU/urem64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
index 0cc048ef0e859a..0e0094cb9cd6ef 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetMachine.cpp
@@ -352,7 +352,7 @@ static cl::opt<bool> EnableMaxIlpSchedStrategy(
 
 static cl::opt<bool> EnableRewritePartialRegUses(
     "amdgpu-enable-rewrite-partial-reg-uses",
-    cl::desc("Enable rewrite partial reg uses pass"), cl::init(false),
+    cl::desc("Enable rewrite partial reg uses pass"), cl::init(true),
     cl::Hidden);
 
 static cl::opt<bool> EnableHipStdPar(

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
index d4c536bdd5ebe1..ac153183be642a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/extractelement.ll
@@ -292,46 +292,46 @@ define i64 @dyn_extract_v8i64_const_s_v(i32 %sel) {
 ; GCN-LABEL: dyn_extract_v8i64_const_s_v:
 ; GCN:       ; %bb.0: ; %entry
 ; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-NEXT:    s_mov_b64 s[4:5], 1
-; GCN-NEXT:    s_mov_b64 s[6:7], 2
-; GCN-NEXT:    v_mov_b32_e32 v1, s4
-; GCN-NEXT:    v_mov_b32_e32 v2, s5
-; GCN-NEXT:    v_mov_b32_e32 v3, s6
-; GCN-NEXT:    v_mov_b32_e32 v4, s7
-; GCN-NEXT:    s_mov_b64 s[8:9], 3
+; GCN-NEXT:    s_mov_b64 s[16:17], 2
+; GCN-NEXT:    s_mov_b64 s[18:19], 1
+; GCN-NEXT:    s_mov_b64 s[14:15], 3
+; GCN-NEXT:    v_mov_b32_e32 v1, s18
+; GCN-NEXT:    v_mov_b32_e32 v2, s19
+; GCN-NEXT:    v_mov_b32_e32 v3, s16
+; GCN-NEXT:    v_mov_b32_e32 v4, s17
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GCN-NEXT:    v_mov_b32_e32 v5, s8
-; GCN-NEXT:    v_mov_b32_e32 v6, s9
-; GCN-NEXT:    s_mov_b64 s[10:11], 4
+; GCN-NEXT:    s_mov_b64 s[12:13], 4
+; GCN-NEXT:    v_mov_b32_e32 v5, s14
+; GCN-NEXT:    v_mov_b32_e32 v6, s15
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GCN-NEXT:    s_mov_b64 s[12:13], 5
-; GCN-NEXT:    v_mov_b32_e32 v7, s10
-; GCN-NEXT:    v_mov_b32_e32 v8, s11
+; GCN-NEXT:    s_mov_b64 s[10:11], 5
+; GCN-NEXT:    v_mov_b32_e32 v7, s12
+; GCN-NEXT:    v_mov_b32_e32 v8, s13
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GCN-NEXT:    s_mov_b64 s[14:15], 6
-; GCN-NEXT:    v_mov_b32_e32 v9, s12
-; GCN-NEXT:    v_mov_b32_e32 v10, s13
+; GCN-NEXT:    s_mov_b64 s[8:9], 6
+; GCN-NEXT:    v_mov_b32_e32 v9, s10
+; GCN-NEXT:    v_mov_b32_e32 v10, s11
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
-; GCN-NEXT:    s_mov_b64 s[16:17], 7
-; GCN-NEXT:    v_mov_b32_e32 v11, s14
-; GCN-NEXT:    v_mov_b32_e32 v12, s15
+; GCN-NEXT:    s_mov_b64 s[6:7], 7
+; GCN-NEXT:    v_mov_b32_e32 v11, s8
+; GCN-NEXT:    v_mov_b32_e32 v12, s9
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
-; GCN-NEXT:    s_mov_b64 s[18:19], 8
-; GCN-NEXT:    v_mov_b32_e32 v13, s16
-; GCN-NEXT:    v_mov_b32_e32 v14, s17
+; GCN-NEXT:    s_mov_b64 s[4:5], 8
+; GCN-NEXT:    v_mov_b32_e32 v13, s6
+; GCN-NEXT:    v_mov_b32_e32 v14, s7
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v12, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v0
-; GCN-NEXT:    v_mov_b32_e32 v15, s18
-; GCN-NEXT:    v_mov_b32_e32 v16, s19
+; GCN-NEXT:    v_mov_b32_e32 v15, s4
+; GCN-NEXT:    v_mov_b32_e32 v16, s5
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v14, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
@@ -342,72 +342,72 @@ define i64 @dyn_extract_v8i64_const_s_v(i32 %sel) {
 ; GFX10-LABEL: dyn_extract_v8i64_const_s_v:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT:    s_mov_b64 s[6:7], 2
+; GFX10-NEXT:    s_mov_b64 s[4:5], 2
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX10-NEXT:    v_mov_b32_e32 v1, s6
-; GFX10-NEXT:    v_mov_b32_e32 v2, s7
-; GFX10-NEXT:    s_mov_b64 s[4:5], 1
-; GFX10-NEXT:    s_mov_b64 s[8:9], 3
-; GFX10-NEXT:    s_mov_b64 s[10:11], 4
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, s4, v1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, s5, v2, vcc_lo
+; GFX10-NEXT:    v_mov_b32_e32 v1, s4
+; GFX10-NEXT:    v_mov_b32_e32 v2, s5
+; GFX10-NEXT:    s_mov_b64 s[6:7], 1
+; GFX10-NEXT:    s_mov_b64 s[4:5], 3
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, s6, v1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, s7, v2, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX10-NEXT:    s_mov_b64 s[12:13], 5
-; GFX10-NEXT:    s_mov_b64 s[14:15], 6
-; GFX10-NEXT:    s_mov_b64 s[16:17], 7
-; GFX10-NEXT:    s_mov_b64 s[18:19], 8
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
+; GFX10-NEXT:    s_mov_b64 s[6:7], 4
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
+; GFX10-NEXT:    s_mov_b64 s[4:5], 5
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
+; GFX10-NEXT:    s_mov_b64 s[6:7], 6
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s15, vcc_lo
+; GFX10-NEXT:    s_mov_b64 s[4:5], 7
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s17, vcc_lo
+; GFX10-NEXT:    s_mov_b64 s[6:7], 8
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s18, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s19, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s6, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s7, vcc_lo
 ; GFX10-NEXT:    s_setpc_b64 s[30:31]
 ;
 ; GFX11-LABEL: dyn_extract_v8i64_const_s_v:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT:    s_mov_b64 s[2:3], 2
+; GFX11-NEXT:    s_mov_b64 s[0:1], 2
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT:    v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s3
-; GFX11-NEXT:    s_mov_b64 s[0:1], 1
-; GFX11-NEXT:    s_mov_b64 s[4:5], 3
-; GFX11-NEXT:    s_mov_b64 s[6:7], 4
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
+; GFX11-NEXT:    v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
+; GFX11-NEXT:    s_mov_b64 s[2:3], 1
+; GFX11-NEXT:    s_mov_b64 s[0:1], 3
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX11-NEXT:    s_mov_b64 s[8:9], 5
-; GFX11-NEXT:    s_mov_b64 s[10:11], 6
-; GFX11-NEXT:    s_mov_b64 s[12:13], 7
-; GFX11-NEXT:    s_mov_b64 s[14:15], 8
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
+; GFX11-NEXT:    s_mov_b64 s[2:3], 4
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s1, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
+; GFX11-NEXT:    s_mov_b64 s[0:1], 5
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s3, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
+; GFX11-NEXT:    s_mov_b64 s[2:3], 6
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s1, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
+; GFX11-NEXT:    s_mov_b64 s[0:1], 7
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s3, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
+; GFX11-NEXT:    s_mov_b64 s[2:3], 8
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s1, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s14, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s15, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s3, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
 entry:
   %ext = extractelement <8 x i64> <i64 1, i64 2, i64 3, i64 4, i64 5, i64 6, i64 7, i64 8>, i32 %sel
@@ -492,40 +492,28 @@ entry:
 define amdgpu_ps void @dyn_extract_v8i64_s_v(<8 x i64> inreg %vec, i32 %sel) {
 ; GPRIDX-LABEL: dyn_extract_v8i64_s_v:
 ; GPRIDX:       ; %bb.0: ; %entry
-; GPRIDX-NEXT:    s_mov_b32 s0, s2
-; GPRIDX-NEXT:    s_mov_b32 s1, s3
-; GPRIDX-NEXT:    s_mov_b32 s2, s4
-; GPRIDX-NEXT:    s_mov_b32 s3, s5
-; GPRIDX-NEXT:    s_mov_b32 s4, s6
-; GPRIDX-NEXT:    s_mov_b32 s5, s7
-; GPRIDX-NEXT:    v_mov_b32_e32 v1, s0
-; GPRIDX-NEXT:    v_mov_b32_e32 v2, s1
-; GPRIDX-NEXT:    v_mov_b32_e32 v3, s2
-; GPRIDX-NEXT:    v_mov_b32_e32 v4, s3
+; GPRIDX-NEXT:    v_mov_b32_e32 v1, s2
+; GPRIDX-NEXT:    v_mov_b32_e32 v2, s3
+; GPRIDX-NEXT:    v_mov_b32_e32 v3, s4
+; GPRIDX-NEXT:    v_mov_b32_e32 v4, s5
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GPRIDX-NEXT:    s_mov_b32 s6, s8
-; GPRIDX-NEXT:    s_mov_b32 s7, s9
-; GPRIDX-NEXT:    v_mov_b32_e32 v5, s4
-; GPRIDX-NEXT:    v_mov_b32_e32 v6, s5
+; GPRIDX-NEXT:    v_mov_b32_e32 v5, s6
+; GPRIDX-NEXT:    v_mov_b32_e32 v6, s7
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GPRIDX-NEXT:    s_mov_b32 s8, s10
-; GPRIDX-NEXT:    s_mov_b32 s9, s11
-; GPRIDX-NEXT:    v_mov_b32_e32 v7, s6
-; GPRIDX-NEXT:    v_mov_b32_e32 v8, s7
+; GPRIDX-NEXT:    v_mov_b32_e32 v7, s8
+; GPRIDX-NEXT:    v_mov_b32_e32 v8, s9
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GPRIDX-NEXT:    s_mov_b32 s10, s12
-; GPRIDX-NEXT:    s_mov_b32 s11, s13
-; GPRIDX-NEXT:    v_mov_b32_e32 v9, s8
-; GPRIDX-NEXT:    v_mov_b32_e32 v10, s9
+; GPRIDX-NEXT:    v_mov_b32_e32 v9, s10
+; GPRIDX-NEXT:    v_mov_b32_e32 v10, s11
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
-; GPRIDX-NEXT:    v_mov_b32_e32 v11, s10
-; GPRIDX-NEXT:    v_mov_b32_e32 v12, s11
+; GPRIDX-NEXT:    v_mov_b32_e32 v11, s12
+; GPRIDX-NEXT:    v_mov_b32_e32 v12, s13
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
@@ -546,40 +534,28 @@ define amdgpu_ps void @dyn_extract_v8i64_s_v(<8 x i64> inreg %vec, i32 %sel) {
 ;
 ; MOVREL-LABEL: dyn_extract_v8i64_s_v:
 ; MOVREL:       ; %bb.0: ; %entry
-; MOVREL-NEXT:    s_mov_b32 s0, s2
-; MOVREL-NEXT:    s_mov_b32 s1, s3
-; MOVREL-NEXT:    s_mov_b32 s2, s4
-; MOVREL-NEXT:    s_mov_b32 s3, s5
-; MOVREL-NEXT:    s_mov_b32 s4, s6
-; MOVREL-NEXT:    s_mov_b32 s5, s7
-; MOVREL-NEXT:    v_mov_b32_e32 v1, s0
-; MOVREL-NEXT:    v_mov_b32_e32 v2, s1
-; MOVREL-NEXT:    v_mov_b32_e32 v3, s2
-; MOVREL-NEXT:    v_mov_b32_e32 v4, s3
+; MOVREL-NEXT:    v_mov_b32_e32 v1, s2
+; MOVREL-NEXT:    v_mov_b32_e32 v2, s3
+; MOVREL-NEXT:    v_mov_b32_e32 v3, s4
+; MOVREL-NEXT:    v_mov_b32_e32 v4, s5
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; MOVREL-NEXT:    s_mov_b32 s6, s8
-; MOVREL-NEXT:    s_mov_b32 s7, s9
-; MOVREL-NEXT:    v_mov_b32_e32 v5, s4
-; MOVREL-NEXT:    v_mov_b32_e32 v6, s5
+; MOVREL-NEXT:    v_mov_b32_e32 v5, s6
+; MOVREL-NEXT:    v_mov_b32_e32 v6, s7
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; MOVREL-NEXT:    s_mov_b32 s8, s10
-; MOVREL-NEXT:    s_mov_b32 s9, s11
-; MOVREL-NEXT:    v_mov_b32_e32 v7, s6
-; MOVREL-NEXT:    v_mov_b32_e32 v8, s7
+; MOVREL-NEXT:    v_mov_b32_e32 v7, s8
+; MOVREL-NEXT:    v_mov_b32_e32 v8, s9
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; MOVREL-NEXT:    s_mov_b32 s10, s12
-; MOVREL-NEXT:    s_mov_b32 s11, s13
-; MOVREL-NEXT:    v_mov_b32_e32 v9, s8
-; MOVREL-NEXT:    v_mov_b32_e32 v10, s9
+; MOVREL-NEXT:    v_mov_b32_e32 v9, s10
+; MOVREL-NEXT:    v_mov_b32_e32 v10, s11
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
-; MOVREL-NEXT:    v_mov_b32_e32 v11, s10
-; MOVREL-NEXT:    v_mov_b32_e32 v12, s11
+; MOVREL-NEXT:    v_mov_b32_e32 v11, s12
+; MOVREL-NEXT:    v_mov_b32_e32 v12, s13
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v1, v1, v9, vcc
 ; MOVREL-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
 ; MOVREL-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v0
@@ -600,88 +576,56 @@ define amdgpu_ps void @dyn_extract_v8i64_s_v(<8 x i64> inreg %vec, i32 %sel) {
 ;
 ; GFX10-LABEL: dyn_extract_v8i64_s_v:
 ; GFX10:       ; %bb.0: ; %entry
-; GFX10-NEXT:    s_mov_b32 s0, s2
-; GFX10-NEXT:    s_mov_b32 s2, s4
-; GFX10-NEXT:    s_mov_b32 s19, s5
-; GFX10-NEXT:    v_mov_b32_e32 v1, s2
-; GFX10-NEXT:    v_mov_b32_e32 v2, s19
+; GFX10-NEXT:    v_mov_b32_e32 v1, s4
+; GFX10-NEXT:    v_mov_b32_e32 v2, s5
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX10-NEXT:    s_mov_b32 s1, s3
-; GFX10-NEXT:    s_mov_b32 s4, s6
-; GFX10-NEXT:    s_mov_b32 s5, s7
-; GFX10-NEXT:    s_mov_b32 s6, s8
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX10-NEXT:    s_mov_b32 s7, s9
-; GFX10-NEXT:    s_mov_b32 s8, s10
-; GFX10-NEXT:    s_mov_b32 s9, s11
-; GFX10-NEXT:    s_mov_b32 s10, s12
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX10-NEXT:    s_mov_b32 s11, s13
-; GFX10-NEXT:    s_mov_b32 s12, s14
-; GFX10-NEXT:    s_mov_b32 s13, s15
-; GFX10-NEXT:    s_mov_b32 s14, s16
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX10-NEXT:    s_mov_b32 s15, s17
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s15, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s15, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s16, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s17, vcc_lo
 ; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[0:1], off
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: dyn_extract_v8i64_s_v:
 ; GFX11:       ; %bb.0: ; %entry
-; GFX11-NEXT:    s_mov_b32 s0, s2
-; GFX11-NEXT:    s_mov_b32 s2, s4
-; GFX11-NEXT:    s_mov_b32 s19, s5
-; GFX11-NEXT:    v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s19
+; GFX11-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT:    s_mov_b32 s1, s3
-; GFX11-NEXT:    s_mov_b32 s4, s6
-; GFX11-NEXT:    s_mov_b32 s5, s7
-; GFX11-NEXT:    s_mov_b32 s6, s8
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX11-NEXT:    s_mov_b32 s7, s9
-; GFX11-NEXT:    s_mov_b32 s8, s10
-; GFX11-NEXT:    s_mov_b32 s9, s11
-; GFX11-NEXT:    s_mov_b32 s10, s12
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX11-NEXT:    s_mov_b32 s11, s13
-; GFX11-NEXT:    s_mov_b32 s12, s14
-; GFX11-NEXT:    s_mov_b32 s13, s15
-; GFX11-NEXT:    s_mov_b32 s14, s16
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
-; GFX11-NEXT:    s_mov_b32 s15, s17
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s14, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s15, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s14, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s15, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s16, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s17, vcc_lo
 ; GFX11-NEXT:    global_store_b64 v[0:1], v[0:1], off
 ; GFX11-NEXT:    s_nop 0
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
@@ -2467,26 +2411,18 @@ entry:
 define amdgpu_ps double @dyn_extract_v6f64_s_v(<6 x double> inreg %vec, i32 %sel) {
 ; GCN-LABEL: dyn_extract_v6f64_s_v:
 ; GCN:       ; %bb.0: ; %entry
-; GCN-NEXT:    s_mov_b32 s0, s2
-; GCN-NEXT:    s_mov_b32 s1, s3
-; GCN-NEXT:    s_mov_b32 s2, s4
-; GCN-NEXT:    s_mov_b32 s3, s5
-; GCN-NEXT:    s_mov_b32 s4, s6
-; GCN-NEXT:    s_mov_b32 s5, s7
-; GCN-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-NEXT:    v_mov_b32_e32 v2, s1
-; GCN-NEXT:    v_mov_b32_e32 v3, s2
-; GCN-NEXT:    v_mov_b32_e32 v4, s3
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_mov_b32_e32 v2, s3
+; GCN-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-NEXT:    v_mov_b32_e32 v4, s5
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GCN-NEXT:    s_mov_b32 s6, s8
-; GCN-NEXT:    s_mov_b32 s7, s9
-; GCN-NEXT:    v_mov_b32_e32 v5, s4
-; GCN-NEXT:    v_mov_b32_e32 v6, s5
+; GCN-NEXT:    v_mov_b32_e32 v5, s6
+; GCN-NEXT:    v_mov_b32_e32 v6, s7
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GCN-NEXT:    v_mov_b32_e32 v7, s6
-; GCN-NEXT:    v_mov_b32_e32 v8, s7
+; GCN-NEXT:    v_mov_b32_e32 v7, s8
+; GCN-NEXT:    v_mov_b32_e32 v8, s9
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
@@ -2508,69 +2444,45 @@ define amdgpu_ps double @dyn_extract_v6f64_s_v(<6 x double> inreg %vec, i32 %sel
 ;
 ; GFX10-LABEL: dyn_extract_v6f64_s_v:
 ; GFX10:       ; %bb.0: ; %entry
-; GFX10-NEXT:    s_mov_b32 s0, s2
-; GFX10-NEXT:    s_mov_b32 s2, s4
-; GFX10-NEXT:    s_mov_b32 s15, s5
-; GFX10-NEXT:    v_mov_b32_e32 v1, s2
-; GFX10-NEXT:    v_mov_b32_e32 v2, s15
+; GFX10-NEXT:    v_mov_b32_e32 v1, s4
+; GFX10-NEXT:    v_mov_b32_e32 v2, s5
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX10-NEXT:    s_mov_b32 s1, s3
-; GFX10-NEXT:    s_mov_b32 s4, s6
-; GFX10-NEXT:    s_mov_b32 s5, s7
-; GFX10-NEXT:    s_mov_b32 s6, s8
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX10-NEXT:    s_mov_b32 s7, s9
-; GFX10-NEXT:    s_mov_b32 s8, s10
-; GFX10-NEXT:    s_mov_b32 s9, s11
-; GFX10-NEXT:    s_mov_b32 s10, s12
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX10-NEXT:    s_mov_b32 s11, s13
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s10, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s11, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s12, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s13, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: dyn_extract_v6f64_s_v:
 ; GFX11:       ; %bb.0: ; %entry
-; GFX11-NEXT:    s_mov_b32 s0, s2
-; GFX11-NEXT:    s_mov_b32 s2, s4
-; GFX11-NEXT:    s_mov_b32 s15, s5
-; GFX11-NEXT:    v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s15
+; GFX11-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT:    s_mov_b32 s1, s3
-; GFX11-NEXT:    s_mov_b32 s4, s6
-; GFX11-NEXT:    s_mov_b32 s5, s7
-; GFX11-NEXT:    s_mov_b32 s6, s8
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX11-NEXT:    s_mov_b32 s7, s9
-; GFX11-NEXT:    s_mov_b32 s8, s10
-; GFX11-NEXT:    s_mov_b32 s9, s11
-; GFX11-NEXT:    s_mov_b32 s10, s12
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX11-NEXT:    s_mov_b32 s11, s13
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s10, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s11, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s12, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s13, vcc_lo
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11-NEXT:    ; return to shader part epilog
@@ -2717,33 +2629,23 @@ entry:
 define amdgpu_ps double @dyn_extract_v7f64_s_v(<7 x double> inreg %vec, i32 %sel) {
 ; GCN-LABEL: dyn_extract_v7f64_s_v:
 ; GCN:       ; %bb.0: ; %entry
-; GCN-NEXT:    s_mov_b32 s0, s2
-; GCN-NEXT:    s_mov_b32 s1, s3
-; GCN-NEXT:    s_mov_b32 s2, s4
-; GCN-NEXT:    s_mov_b32 s3, s5
-; GCN-NEXT:    s_mov_b32 s4, s6
-; GCN-NEXT:    s_mov_b32 s5, s7
-; GCN-NEXT:    v_mov_b32_e32 v1, s0
-; GCN-NEXT:    v_mov_b32_e32 v2, s1
-; GCN-NEXT:    v_mov_b32_e32 v3, s2
-; GCN-NEXT:    v_mov_b32_e32 v4, s3
+; GCN-NEXT:    v_mov_b32_e32 v1, s2
+; GCN-NEXT:    v_mov_b32_e32 v2, s3
+; GCN-NEXT:    v_mov_b32_e32 v3, s4
+; GCN-NEXT:    v_mov_b32_e32 v4, s5
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
-; GCN-NEXT:    s_mov_b32 s6, s8
-; GCN-NEXT:    s_mov_b32 s7, s9
-; GCN-NEXT:    v_mov_b32_e32 v5, s4
-; GCN-NEXT:    v_mov_b32_e32 v6, s5
+; GCN-NEXT:    v_mov_b32_e32 v5, s6
+; GCN-NEXT:    v_mov_b32_e32 v6, s7
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v3, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v4, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v0
-; GCN-NEXT:    s_mov_b32 s8, s10
-; GCN-NEXT:    s_mov_b32 s9, s11
-; GCN-NEXT:    v_mov_b32_e32 v7, s6
-; GCN-NEXT:    v_mov_b32_e32 v8, s7
+; GCN-NEXT:    v_mov_b32_e32 v7, s8
+; GCN-NEXT:    v_mov_b32_e32 v8, s9
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v5, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v0
-; GCN-NEXT:    v_mov_b32_e32 v9, s8
-; GCN-NEXT:    v_mov_b32_e32 v10, s9
+; GCN-NEXT:    v_mov_b32_e32 v9, s10
+; GCN-NEXT:    v_mov_b32_e32 v10, s11
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v7, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v0
@@ -2760,8 +2662,8 @@ define amdgpu_ps double @dyn_extract_v7f64_s_v(<7 x double> inreg %vec, i32 %sel
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v13, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v14, vcc
 ; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v0
-; GCN-NEXT:    ; kill: def $vgpr15 killed $sgpr14 killed $exec
-; GCN-NEXT:    ; kill: def $vgpr16 killed $sgpr15 killed $exec
+; GCN-NEXT:    ; kill: def $vgpr15 killed $sgpr2 killed $exec
+; GCN-NEXT:    ; kill: def $vgpr16 killed $sgpr3 killed $exec
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v1, v15, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v2, v16, vcc
 ; GCN-NEXT:    v_readfirstlane_b32 s0, v0
@@ -2770,85 +2672,59 @@ define amdgpu_ps double @dyn_extract_v7f64_s_v(<7 x double> inreg %vec, i32 %sel
 ;
 ; GFX10-LABEL: dyn_extract_v7f64_s_v:
 ; GFX10:       ; %bb.0: ; %entry
-; GFX10-NEXT:    s_mov_b32 s0, s2
-; GFX10-NEXT:    s_mov_b32 s2, s4
-; GFX10-NEXT:    s_mov_b32 s19, s5
-; GFX10-NEXT:    v_mov_b32_e32 v1, s2
-; GFX10-NEXT:    v_mov_b32_e32 v2, s19
+; GFX10-NEXT:    v_mov_b32_e32 v1, s4
+; GFX10-NEXT:    v_mov_b32_e32 v2, s5
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX10-NEXT:    s_mov_b32 s1, s3
-; GFX10-NEXT:    s_mov_b32 s4, s6
-; GFX10-NEXT:    s_mov_b32 s5, s7
-; GFX10-NEXT:    s_mov_b32 s6, s8
-; GFX10-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
+; GFX10-NEXT:    s_mov_b32 s0, s14
+; GFX10-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX10-NEXT:    s_mov_b32 s7, s9
-; GFX10-NEXT:    s_mov_b32 s8, s10
-; GFX10-NEXT:    s_mov_b32 s9, s11
-; GFX10-NEXT:    s_mov_b32 s10, s12
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX10-NEXT:    s_mov_b32 s11, s13
-; GFX10-NEXT:    s_mov_b32 s12, s14
-; GFX10-NEXT:    s_mov_b32 s13, s15
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s15, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s15, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v1, s2, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v2, s3, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: dyn_extract_v7f64_s_v:
 ; GFX11:       ; %bb.0: ; %entry
-; GFX11-NEXT:    s_mov_b32 s0, s2
-; GFX11-NEXT:    s_mov_b32 s2, s4
-; GFX11-NEXT:    s_mov_b32 s19, s5
-; GFX11-NEXT:    v_dual_mov_b32 v1, s2 :: v_dual_mov_b32 v2, s19
+; GFX11-NEXT:    v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX11-NEXT:    s_mov_b32 s1, s3
-; GFX11-NEXT:    s_mov_b32 s4, s6
-; GFX11-NEXT:    s_mov_b32 s5, s7
-; GFX11-NEXT:    s_mov_b32 s6, s8
-; GFX11-NEXT:    v_cndmask_b32_e32 v1, s0, v1, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e32 v2, s1, v2, vcc_lo
+; GFX11-NEXT:    s_mov_b32 s0, s14
+; GFX11-NEXT:    v_cndmask_b32_e32 v1, s2, v1, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v2, s3, v2, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
-; GFX11-NEXT:    s_mov_b32 s7, s9
-; GFX11-NEXT:    s_mov_b32 s8, s10
-; GFX11-NEXT:    s_mov_b32 s9, s11
-; GFX11-NEXT:    s_mov_b32 s10, s12
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s4, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s5, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX11-NEXT:    s_mov_b32 s11, s13
-; GFX11-NEXT:    s_mov_b32 s12, s14
-; GFX11-NEXT:    s_mov_b32 s13, s15
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s6, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s7, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s8, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s9, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s10, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s11, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s12, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s13, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s0, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s15, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v0
-; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s14, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s15, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v1, s2, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v2, s3, vcc_lo
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11-NEXT:    ; return to shader part epilog

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll
index 30b66e1fdd3405..676a02aaf8a19c 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.i16.ll
@@ -3296,6 +3296,10 @@ define amdgpu_ps void @insertelement_v_v16i16_s_s(ptr addrspace(1) %ptr, i16 inr
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[6:7], s12, 5
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[8:9], s12, 6
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[10:11], s12, 7
+; GFX9-NEXT:    v_mov_b32_e32 v10, 0
+; GFX9-NEXT:    v_mov_b32_e32 v11, 0
+; GFX9-NEXT:    v_mov_b32_e32 v12, 16
+; GFX9-NEXT:    v_mov_b32_e32 v13, 0
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v2, v3, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v4, s[0:1]
@@ -3305,22 +3309,18 @@ define amdgpu_ps void @insertelement_v_v16i16_s_s(ptr addrspace(1) %ptr, i16 inr
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v7, s[6:7]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v8, s[8:9]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v9, s[10:11]
-; GFX9-NEXT:    v_and_or_b32 v10, v1, s13, v0
+; GFX9-NEXT:    v_and_or_b32 v14, v1, s13, v0
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[12:13], s12, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, v10, s[12:13]
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v10, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v4, v10, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v5, v10, s[2:3]
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, v6, v10, s[4:5]
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v7, v10, s[6:7]
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, v8, v10, s[8:9]
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, v9, v10, s[10:11]
-; GFX9-NEXT:    v_mov_b32_e32 v8, 0
-; GFX9-NEXT:    v_mov_b32_e32 v9, 0
-; GFX9-NEXT:    v_mov_b32_e32 v10, 16
-; GFX9-NEXT:    v_mov_b32_e32 v11, 0
-; GFX9-NEXT:    global_store_dwordx4 v[8:9], v[0:3], off
-; GFX9-NEXT:    global_store_dwordx4 v[10:11], v[4:7], off
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v2, v14, s[12:13]
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v14, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, v4, v14, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v5, v14, s[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e64 v4, v6, v14, s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v5, v7, v14, s[6:7]
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, v8, v14, s[8:9]
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, v9, v14, s[10:11]
+; GFX9-NEXT:    global_store_dwordx4 v[10:11], v[0:3], off
+; GFX9-NEXT:    global_store_dwordx4 v[12:13], v[4:7], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: insertelement_v_v16i16_s_s:
@@ -3436,52 +3436,52 @@ define amdgpu_ps void @insertelement_s_v16i16_v_s(ptr addrspace(4) inreg %ptr, i
 ; GFX9-LABEL: insertelement_s_v16i16_v_s:
 ; GFX9:       ; %bb.0:
 ; GFX9-NEXT:    s_load_dwordx8 s[8:15], s[2:3], 0x0
-; GFX9-NEXT:    s_lshr_b32 s2, s4, 1
-; GFX9-NEXT:    s_cmp_eq_u32 s2, 1
+; GFX9-NEXT:    s_lshr_b32 s0, s4, 1
+; GFX9-NEXT:    s_cmp_eq_u32 s0, 1
 ; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_cselect_b32 s0, s9, s8
-; GFX9-NEXT:    s_cmp_eq_u32 s2, 2
-; GFX9-NEXT:    s_cselect_b32 s0, s10, s0
-; GFX9-NEXT:    s_cmp_eq_u32 s2, 3
-; GFX9-NEXT:    s_cselect_b32 s0, s11, s0
-; GFX9-NEXT:    s_cmp_eq_u32 s2, 4
-; GFX9-NEXT:    s_cselect_b32 s0, s12, s0
-; GFX9-NEXT:    s_cmp_eq_u32 s2, 5
-; GFX9-NEXT:    s_cselect_b32 s0, s13, s0
-; GFX9-NEXT:    s_cmp_eq_u32 s2, 6
-; GFX9-NEXT:    s_cselect_b32 s0, s14, s0
-; GFX9-NEXT:    s_cmp_eq_u32 s2, 7
-; GFX9-NEXT:    s_cselect_b32 s0, s15, s0
-; GFX9-NEXT:    s_and_b32 s1, s4, 1
-; GFX9-NEXT:    s_lshl_b32 s1, s1, 4
-; GFX9-NEXT:    s_lshl_b32 s3, 0xffff, s1
-; GFX9-NEXT:    s_andn2_b32 s0, s0, s3
-; GFX9-NEXT:    v_mov_b32_e32 v1, s0
-; GFX9-NEXT:    v_lshl_or_b32 v8, v0, s1, v1
+; GFX9-NEXT:    s_cselect_b32 s1, s9, s8
+; GFX9-NEXT:    s_cmp_eq_u32 s0, 2
+; GFX9-NEXT:    s_cselect_b32 s1, s10, s1
+; GFX9-NEXT:    s_cmp_eq_u32 s0, 3
+; GFX9-NEXT:    s_cselect_b32 s1, s11, s1
+; GFX9-NEXT:    s_cmp_eq_u32 s0, 4
+; GFX9-NEXT:    s_cselect_b32 s1, s12, s1
+; GFX9-NEXT:    s_cmp_eq_u32 s0, 5
+; GFX9-NEXT:    s_cselect_b32 s1, s13, s1
+; GFX9-NEXT:    s_cmp_eq_u32 s0, 6
+; GFX9-NEXT:    s_cselect_b32 s1, s14, s1
+; GFX9-NEXT:    s_cmp_eq_u32 s0, 7
+; GFX9-NEXT:    s_cselect_b32 s1, s15, s1
+; GFX9-NEXT:    s_and_b32 s2, s4, 1
+; GFX9-NEXT:    s_lshl_b32 s2, s2, 4
+; GFX9-NEXT:    s_lshl_b32 s3, 0xffff, s2
+; GFX9-NEXT:    s_andn2_b32 s1, s1, s3
+; GFX9-NEXT:    v_mov_b32_e32 v1, s1
+; GFX9-NEXT:    v_lshl_or_b32 v8, v0, s2, v1
 ; GFX9-NEXT:    v_mov_b32_e32 v0, s8
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s9
 ; GFX9-NEXT:    v_cndmask_b32_e32 v0, v0, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 1
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 1
 ; GFX9-NEXT:    v_mov_b32_e32 v2, s10
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v1, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 2
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 2
 ; GFX9-NEXT:    v_mov_b32_e32 v3, s11
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v2, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 3
-; GFX9-NEXT:    v_mov_b32_e32 v5, s13
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 3
+; GFX9-NEXT:    v_mov_b32_e32 v4, s12
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v3, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 5
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 4
+; GFX9-NEXT:    v_mov_b32_e32 v5, s13
+; GFX9-NEXT:    v_cndmask_b32_e32 v4, v4, v8, vcc
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 5
 ; GFX9-NEXT:    v_mov_b32_e32 v6, s14
 ; GFX9-NEXT:    v_cndmask_b32_e32 v5, v5, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 6
-; GFX9-NEXT:    v_mov_b32_e32 v4, s12
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 6
 ; GFX9-NEXT:    v_mov_b32_e32 v7, s15
-; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], s2, 4
 ; GFX9-NEXT:    v_cndmask_b32_e32 v6, v6, v8, vcc
-; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 7
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, v4, v8, s[0:1]
+; GFX9-NEXT:    v_cmp_eq_u32_e64 vcc, s0, 7
 ; GFX9-NEXT:    v_cndmask_b32_e32 v7, v7, v8, vcc
 ; GFX9-NEXT:    v_mov_b32_e32 v8, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v9, 0
@@ -4215,7 +4215,7 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(ptr addrspace(1) %ptr, i16 inr
 ; GFX9-NEXT:    s_and_b32 s0, s2, 0xffff
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v2, 4, v2
 ; GFX9-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GFX9-NEXT:    v_lshlrev_b32_e64 v11, v2, s0
+; GFX9-NEXT:    v_lshlrev_b32_e64 v15, v2, s0
 ; GFX9-NEXT:    v_lshlrev_b32_e32 v0, v2, v0
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[0:1], 2, v1
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[2:3], 3, v1
@@ -4224,7 +4224,11 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(ptr addrspace(1) %ptr, i16 inr
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[8:9], 6, v1
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v1
 ; GFX9-NEXT:    v_not_b32_e32 v0, v0
+; GFX9-NEXT:    v_mov_b32_e32 v11, 0
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v1
+; GFX9-NEXT:    v_mov_b32_e32 v12, 0
+; GFX9-NEXT:    v_mov_b32_e32 v13, 16
+; GFX9-NEXT:    v_mov_b32_e32 v14, 0
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v5, s[0:1]
@@ -4234,21 +4238,17 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(ptr addrspace(1) %ptr, i16 inr
 ; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v8, s[6:7]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v9, s[8:9]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s[10:11]
-; GFX9-NEXT:    v_and_or_b32 v11, v2, v0, v11
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v3, v11, s[12:13]
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v5, v11, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v6, v11, s[2:3]
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v8, v11, s[6:7]
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, v9, v11, s[8:9]
-; GFX9-NEXT:    v_mov_b32_e32 v8, 0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v4, v11, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, v7, v11, s[4:5]
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, v10, v11, s[10:11]
-; GFX9-NEXT:    v_mov_b32_e32 v9, 0
-; GFX9-NEXT:    v_mov_b32_e32 v10, 16
-; GFX9-NEXT:    v_mov_b32_e32 v11, 0
-; GFX9-NEXT:    global_store_dwordx4 v[8:9], v[0:3], off
-; GFX9-NEXT:    global_store_dwordx4 v[10:11], v[4:7], off
+; GFX9-NEXT:    v_and_or_b32 v15, v2, v0, v15
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v3, v15, s[12:13]
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v4, v15, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, v5, v15, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v6, v15, s[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e64 v4, v7, v15, s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v5, v8, v15, s[6:7]
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, v9, v15, s[8:9]
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, v10, v15, s[10:11]
+; GFX9-NEXT:    global_store_dwordx4 v[11:12], v[0:3], off
+; GFX9-NEXT:    global_store_dwordx4 v[13:14], v[4:7], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: insertelement_v_v16i16_s_v:
@@ -4263,7 +4263,7 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(ptr addrspace(1) %ptr, i16 inr
 ; GFX8-NEXT:    s_and_b32 s0, s2, 0xffff
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v2, 4, v2
 ; GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v1
-; GFX8-NEXT:    v_lshlrev_b32_e64 v11, v2, s0
+; GFX8-NEXT:    v_lshlrev_b32_e64 v15, v2, s0
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v0, v2, v0
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[0:1], 2, v1
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[2:3], 3, v1
@@ -4272,7 +4272,11 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(ptr addrspace(1) %ptr, i16 inr
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[8:9], 6, v1
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v1
 ; GFX8-NEXT:    v_not_b32_e32 v0, v0
+; GFX8-NEXT:    v_mov_b32_e32 v11, 0
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v1
+; GFX8-NEXT:    v_mov_b32_e32 v12, 0
+; GFX8-NEXT:    v_mov_b32_e32 v13, 16
+; GFX8-NEXT:    v_mov_b32_e32 v14, 0
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
 ; GFX8-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, v5, s[0:1]
@@ -4283,30 +4287,26 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(ptr addrspace(1) %ptr, i16 inr
 ; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, v9, s[8:9]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s[10:11]
 ; GFX8-NEXT:    v_and_b32_e32 v0, v2, v0
-; GFX8-NEXT:    v_or_b32_e32 v11, v0, v11
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v3, v11, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v5, v11, s[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e64 v3, v6, v11, s[2:3]
-; GFX8-NEXT:    v_cndmask_b32_e64 v5, v8, v11, s[6:7]
-; GFX8-NEXT:    v_cndmask_b32_e64 v6, v9, v11, s[8:9]
-; GFX8-NEXT:    v_mov_b32_e32 v8, 0
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v4, v11, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v4, v7, v11, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v7, v10, v11, s[10:11]
-; GFX8-NEXT:    v_mov_b32_e32 v9, 0
-; GFX8-NEXT:    v_mov_b32_e32 v10, 16
-; GFX8-NEXT:    v_mov_b32_e32 v11, 0
-; GFX8-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
-; GFX8-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
+; GFX8-NEXT:    v_or_b32_e32 v15, v0, v15
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v3, v15, s[12:13]
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v4, v15, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v5, v15, s[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, v6, v15, s[2:3]
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, v7, v15, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, v8, v15, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, v9, v15, s[8:9]
+; GFX8-NEXT:    v_cndmask_b32_e64 v7, v10, v15, s[10:11]
+; GFX8-NEXT:    flat_store_dwordx4 v[11:12], v[0:3]
+; GFX8-NEXT:    flat_store_dwordx4 v[13:14], v[4:7]
 ; GFX8-NEXT:    s_endpgm
 ;
 ; GFX7-LABEL: insertelement_v_v16i16_s_v:
 ; GFX7:       ; %bb.0:
-; GFX7-NEXT:    s_mov_b32 s14, 0
-; GFX7-NEXT:    s_mov_b32 s15, 0xf000
-; GFX7-NEXT:    s_mov_b64 s[12:13], 0
-; GFX7-NEXT:    buffer_load_dwordx4 v[3:6], v[0:1], s[12:15], 0 addr64
-; GFX7-NEXT:    buffer_load_dwordx4 v[7:10], v[0:1], s[12:15], 0 addr64 offset:16
+; GFX7-NEXT:    s_mov_b32 s18, 0
+; GFX7-NEXT:    s_mov_b32 s19, 0xf000
+; GFX7-NEXT:    s_mov_b64 s[16:17], 0
+; GFX7-NEXT:    buffer_load_dwordx4 v[3:6], v[0:1], s[16:19], 0 addr64
+; GFX7-NEXT:    buffer_load_dwordx4 v[7:10], v[0:1], s[16:19], 0 addr64 offset:16
 ; GFX7-NEXT:    v_lshrrev_b32_e32 v0, 1, v2
 ; GFX7-NEXT:    v_and_b32_e32 v1, 1, v2
 ; GFX7-NEXT:    s_and_b32 s0, s2, 0xffff
@@ -4322,7 +4322,8 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(ptr addrspace(1) %ptr, i16 inr
 ; GFX7-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v0
 ; GFX7-NEXT:    v_not_b32_e32 v1, v1
 ; GFX7-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v0
-; GFX7-NEXT:    s_mov_b32 s14, -1
+; GFX7-NEXT:    s_mov_b64 s[16:17], 0
+; GFX7-NEXT:    s_mov_b32 s18, -1
 ; GFX7-NEXT:    s_waitcnt vmcnt(1)
 ; GFX7-NEXT:    v_cndmask_b32_e32 v11, v3, v4, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e64 v11, v11, v5, s[0:1]
@@ -4338,14 +4339,13 @@ define amdgpu_ps void @insertelement_v_v16i16_s_v(ptr addrspace(1) %ptr, i16 inr
 ; GFX7-NEXT:    v_cndmask_b32_e32 v1, v4, v11, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e64 v2, v5, v11, s[0:1]
 ; GFX7-NEXT:    v_cndmask_b32_e64 v3, v6, v11, s[2:3]
-; GFX7-NEXT:    s_mov_b64 s[12:13], 0
 ; GFX7-NEXT:    v_cndmask_b32_e64 v4, v7, v11, s[4:5]
 ; GFX7-NEXT:    v_cndmask_b32_e64 v5, v8, v11, s[6:7]
 ; GFX7-NEXT:    v_cndmask_b32_e64 v6, v9, v11, s[8:9]
 ; GFX7-NEXT:    v_cndmask_b32_e64 v7, v10, v11, s[10:11]
-; GFX7-NEXT:    buffer_store_dwordx4 v[0:3], off, s[12:15], 0
-; GFX7-NEXT:    s_mov_b64 s[12:13], 16
-; GFX7-NEXT:    buffer_store_dwordx4 v[4:7], off, s[12:15], 0
+; GFX7-NEXT:    buffer_store_dwordx4 v[0:3], off, s[16:19], 0
+; GFX7-NEXT:    s_mov_b64 s[16:17], 16
+; GFX7-NEXT:    buffer_store_dwordx4 v[4:7], off, s[16:19], 0
 ; GFX7-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: insertelement_v_v16i16_s_v:
@@ -4470,6 +4470,10 @@ define amdgpu_ps void @insertelement_v_v16i16_v_s(ptr addrspace(1) %ptr, i16 %va
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[6:7], s12, 5
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[8:9], s12, 6
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[10:11], s12, 7
+; GFX9-NEXT:    v_mov_b32_e32 v11, 0
+; GFX9-NEXT:    v_mov_b32_e32 v12, 0
+; GFX9-NEXT:    v_mov_b32_e32 v13, 16
+; GFX9-NEXT:    v_mov_b32_e32 v14, 0
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_cndmask_b32_e32 v1, v3, v4, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v5, s[0:1]
@@ -4479,22 +4483,18 @@ define amdgpu_ps void @insertelement_v_v16i16_v_s(ptr addrspace(1) %ptr, i16 %va
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v8, s[6:7]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v9, s[8:9]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v1, v1, v10, s[10:11]
-; GFX9-NEXT:    v_and_or_b32 v11, v1, s13, v0
+; GFX9-NEXT:    v_and_or_b32 v15, v1, s13, v0
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[12:13], s12, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v3, v11, s[12:13]
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v5, v11, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v6, v11, s[2:3]
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v8, v11, s[6:7]
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, v9, v11, s[8:9]
-; GFX9-NEXT:    v_mov_b32_e32 v8, 0
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v4, v11, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, v7, v11, s[4:5]
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, v10, v11, s[10:11]
-; GFX9-NEXT:    v_mov_b32_e32 v9, 0
-; GFX9-NEXT:    v_mov_b32_e32 v10, 16
-; GFX9-NEXT:    v_mov_b32_e32 v11, 0
-; GFX9-NEXT:    global_store_dwordx4 v[8:9], v[0:3], off
-; GFX9-NEXT:    global_store_dwordx4 v[10:11], v[4:7], off
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v3, v15, s[12:13]
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v4, v15, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, v5, v15, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v6, v15, s[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e64 v4, v7, v15, s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v5, v8, v15, s[6:7]
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, v9, v15, s[8:9]
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, v10, v15, s[10:11]
+; GFX9-NEXT:    global_store_dwordx4 v[11:12], v[0:3], off
+; GFX9-NEXT:    global_store_dwordx4 v[13:14], v[4:7], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: insertelement_v_v16i16_v_s:
@@ -4624,7 +4624,11 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(ptr addrspace(1) %ptr, i16 %va
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[8:9], 6, v1
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v1
 ; GFX9-NEXT:    v_not_b32_e32 v0, v0
+; GFX9-NEXT:    v_mov_b32_e32 v12, 0
 ; GFX9-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v1
+; GFX9-NEXT:    v_mov_b32_e32 v13, 0
+; GFX9-NEXT:    v_mov_b32_e32 v14, 16
+; GFX9-NEXT:    v_mov_b32_e32 v15, 0
 ; GFX9-NEXT:    s_waitcnt vmcnt(1)
 ; GFX9-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v6, s[0:1]
@@ -4634,21 +4638,17 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(ptr addrspace(1) %ptr, i16 %va
 ; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v9, s[6:7]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v10, s[8:9]
 ; GFX9-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s[10:11]
-; GFX9-NEXT:    v_and_or_b32 v12, v3, v0, v2
-; GFX9-NEXT:    v_cndmask_b32_e64 v0, v4, v12, s[12:13]
-; GFX9-NEXT:    v_cndmask_b32_e32 v1, v5, v12, vcc
-; GFX9-NEXT:    v_cndmask_b32_e64 v4, v8, v12, s[4:5]
-; GFX9-NEXT:    v_cndmask_b32_e64 v5, v9, v12, s[6:7]
-; GFX9-NEXT:    v_mov_b32_e32 v8, 0
-; GFX9-NEXT:    v_cndmask_b32_e64 v2, v6, v12, s[0:1]
-; GFX9-NEXT:    v_cndmask_b32_e64 v3, v7, v12, s[2:3]
-; GFX9-NEXT:    v_cndmask_b32_e64 v6, v10, v12, s[8:9]
-; GFX9-NEXT:    v_cndmask_b32_e64 v7, v11, v12, s[10:11]
-; GFX9-NEXT:    v_mov_b32_e32 v9, 0
-; GFX9-NEXT:    v_mov_b32_e32 v10, 16
-; GFX9-NEXT:    v_mov_b32_e32 v11, 0
-; GFX9-NEXT:    global_store_dwordx4 v[8:9], v[0:3], off
-; GFX9-NEXT:    global_store_dwordx4 v[10:11], v[4:7], off
+; GFX9-NEXT:    v_and_or_b32 v16, v3, v0, v2
+; GFX9-NEXT:    v_cndmask_b32_e64 v0, v4, v16, s[12:13]
+; GFX9-NEXT:    v_cndmask_b32_e32 v1, v5, v16, vcc
+; GFX9-NEXT:    v_cndmask_b32_e64 v2, v6, v16, s[0:1]
+; GFX9-NEXT:    v_cndmask_b32_e64 v3, v7, v16, s[2:3]
+; GFX9-NEXT:    v_cndmask_b32_e64 v4, v8, v16, s[4:5]
+; GFX9-NEXT:    v_cndmask_b32_e64 v5, v9, v16, s[6:7]
+; GFX9-NEXT:    v_cndmask_b32_e64 v6, v10, v16, s[8:9]
+; GFX9-NEXT:    v_cndmask_b32_e64 v7, v11, v16, s[10:11]
+; GFX9-NEXT:    global_store_dwordx4 v[12:13], v[0:3], off
+; GFX9-NEXT:    global_store_dwordx4 v[14:15], v[4:7], off
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: insertelement_v_v16i16_v_v:
@@ -4671,7 +4671,11 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(ptr addrspace(1) %ptr, i16 %va
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[8:9], 6, v1
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v1
 ; GFX8-NEXT:    v_not_b32_e32 v0, v0
+; GFX8-NEXT:    v_mov_b32_e32 v12, 0
 ; GFX8-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v1
+; GFX8-NEXT:    v_mov_b32_e32 v13, 0
+; GFX8-NEXT:    v_mov_b32_e32 v14, 16
+; GFX8-NEXT:    v_mov_b32_e32 v15, 0
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
 ; GFX8-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX8-NEXT:    v_cndmask_b32_e64 v3, v3, v6, s[0:1]
@@ -4682,30 +4686,26 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(ptr addrspace(1) %ptr, i16 %va
 ; GFX8-NEXT:    v_cndmask_b32_e64 v3, v3, v10, s[8:9]
 ; GFX8-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s[10:11]
 ; GFX8-NEXT:    v_and_b32_e32 v0, v3, v0
-; GFX8-NEXT:    v_or_b32_e32 v12, v0, v2
-; GFX8-NEXT:    v_cndmask_b32_e64 v0, v4, v12, s[12:13]
-; GFX8-NEXT:    v_cndmask_b32_e32 v1, v5, v12, vcc
-; GFX8-NEXT:    v_cndmask_b32_e64 v4, v8, v12, s[4:5]
-; GFX8-NEXT:    v_cndmask_b32_e64 v5, v9, v12, s[6:7]
-; GFX8-NEXT:    v_mov_b32_e32 v8, 0
-; GFX8-NEXT:    v_cndmask_b32_e64 v2, v6, v12, s[0:1]
-; GFX8-NEXT:    v_cndmask_b32_e64 v3, v7, v12, s[2:3]
-; GFX8-NEXT:    v_cndmask_b32_e64 v6, v10, v12, s[8:9]
-; GFX8-NEXT:    v_cndmask_b32_e64 v7, v11, v12, s[10:11]
-; GFX8-NEXT:    v_mov_b32_e32 v9, 0
-; GFX8-NEXT:    v_mov_b32_e32 v10, 16
-; GFX8-NEXT:    v_mov_b32_e32 v11, 0
-; GFX8-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
-; GFX8-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
+; GFX8-NEXT:    v_or_b32_e32 v16, v0, v2
+; GFX8-NEXT:    v_cndmask_b32_e64 v0, v4, v16, s[12:13]
+; GFX8-NEXT:    v_cndmask_b32_e32 v1, v5, v16, vcc
+; GFX8-NEXT:    v_cndmask_b32_e64 v2, v6, v16, s[0:1]
+; GFX8-NEXT:    v_cndmask_b32_e64 v3, v7, v16, s[2:3]
+; GFX8-NEXT:    v_cndmask_b32_e64 v4, v8, v16, s[4:5]
+; GFX8-NEXT:    v_cndmask_b32_e64 v5, v9, v16, s[6:7]
+; GFX8-NEXT:    v_cndmask_b32_e64 v6, v10, v16, s[8:9]
+; GFX8-NEXT:    v_cndmask_b32_e64 v7, v11, v16, s[10:11]
+; GFX8-NEXT:    flat_store_dwordx4 v[12:13], v[0:3]
+; GFX8-NEXT:    flat_store_dwordx4 v[14:15], v[4:7]
 ; GFX8-NEXT:    s_endpgm
 ;
 ; GFX7-LABEL: insertelement_v_v16i16_v_v:
 ; GFX7:       ; %bb.0:
-; GFX7-NEXT:    s_mov_b32 s14, 0
-; GFX7-NEXT:    s_mov_b32 s15, 0xf000
-; GFX7-NEXT:    s_mov_b64 s[12:13], 0
-; GFX7-NEXT:    buffer_load_dwordx4 v[4:7], v[0:1], s[12:15], 0 addr64
-; GFX7-NEXT:    buffer_load_dwordx4 v[8:11], v[0:1], s[12:15], 0 addr64 offset:16
+; GFX7-NEXT:    s_mov_b32 s18, 0
+; GFX7-NEXT:    s_mov_b32 s19, 0xf000
+; GFX7-NEXT:    s_mov_b64 s[16:17], 0
+; GFX7-NEXT:    buffer_load_dwordx4 v[4:7], v[0:1], s[16:19], 0 addr64
+; GFX7-NEXT:    buffer_load_dwordx4 v[8:11], v[0:1], s[16:19], 0 addr64 offset:16
 ; GFX7-NEXT:    v_lshrrev_b32_e32 v0, 1, v3
 ; GFX7-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v0
 ; GFX7-NEXT:    v_and_b32_e32 v1, 1, v3
@@ -4721,7 +4721,8 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(ptr addrspace(1) %ptr, i16 %va
 ; GFX7-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v0
 ; GFX7-NEXT:    v_not_b32_e32 v1, v1
 ; GFX7-NEXT:    v_cmp_eq_u32_e64 s[12:13], 0, v0
-; GFX7-NEXT:    s_mov_b32 s14, -1
+; GFX7-NEXT:    s_mov_b64 s[16:17], 0
+; GFX7-NEXT:    s_mov_b32 s18, -1
 ; GFX7-NEXT:    s_waitcnt vmcnt(1)
 ; GFX7-NEXT:    v_cndmask_b32_e32 v3, v4, v5, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e64 v3, v3, v6, s[0:1]
@@ -4737,14 +4738,13 @@ define amdgpu_ps void @insertelement_v_v16i16_v_v(ptr addrspace(1) %ptr, i16 %va
 ; GFX7-NEXT:    v_cndmask_b32_e32 v1, v5, v12, vcc
 ; GFX7-NEXT:    v_cndmask_b32_e64 v2, v6, v12, s[0:1]
 ; GFX7-NEXT:    v_cndmask_b32_e64 v3, v7, v12, s[2:3]
-; GFX7-NEXT:    s_mov_b64 s[12:13], 0
 ; GFX7-NEXT:    v_cndmask_b32_e64 v4, v8, v12, s[4:5]
 ; GFX7-NEXT:    v_cndmask_b32_e64 v5, v9, v12, s[6:7]
 ; GFX7-NEXT:    v_cndmask_b32_e64 v6, v10, v12, s[8:9]
 ; GFX7-NEXT:    v_cndmask_b32_e64 v7, v11, v12, s[10:11]
-; GFX7-NEXT:    buffer_store_dwordx4 v[0:3], off, s[12:15], 0
-; GFX7-NEXT:    s_mov_b64 s[12:13], 16
-; GFX7-NEXT:    buffer_store_dwordx4 v[4:7], off, s[12:15], 0
+; GFX7-NEXT:    buffer_store_dwordx4 v[0:3], off, s[16:19], 0
+; GFX7-NEXT:    s_mov_b64 s[16:17], 16
+; GFX7-NEXT:    buffer_store_dwordx4 v[4:7], off, s[16:19], 0
 ; GFX7-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: insertelement_v_v16i16_v_v:

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll
index d910058f0ebd92..3abc21f812e145 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.large.ll
@@ -9,43 +9,34 @@ define amdgpu_kernel void @v_insert_v64i32_37(ptr addrspace(1) %ptr.in, ptr addr
 ; GCN-NEXT:    s_load_dwordx4 s[0:3], s[4:5], 0x0
 ; GCN-NEXT:    v_lshlrev_b32_e32 v64, 8, v0
 ; GCN-NEXT:    s_waitcnt lgkmcnt(0)
-; GCN-NEXT:    global_load_dwordx4 v[4:7], v64, s[0:1] offset:144
 ; GCN-NEXT:    global_load_dwordx4 v[0:3], v64, s[0:1]
-; GCN-NEXT:    global_load_dwordx4 v[8:11], v64, s[0:1] offset:16
-; GCN-NEXT:    global_load_dwordx4 v[12:15], v64, s[0:1] offset:32
-; GCN-NEXT:    global_load_dwordx4 v[16:19], v64, s[0:1] offset:48
-; GCN-NEXT:    global_load_dwordx4 v[20:23], v64, s[0:1] offset:64
-; GCN-NEXT:    global_load_dwordx4 v[24:27], v64, s[0:1] offset:80
-; GCN-NEXT:    global_load_dwordx4 v[28:31], v64, s[0:1] offset:96
-; GCN-NEXT:    global_load_dwordx4 v[32:35], v64, s[0:1] offset:112
-; GCN-NEXT:    global_load_dwordx4 v[36:39], v64, s[0:1] offset:128
+; GCN-NEXT:    global_load_dwordx4 v[4:7], v64, s[0:1] offset:16
+; GCN-NEXT:    global_load_dwordx4 v[8:11], v64, s[0:1] offset:32
+; GCN-NEXT:    global_load_dwordx4 v[12:15], v64, s[0:1] offset:48
+; GCN-NEXT:    global_load_dwordx4 v[16:19], v64, s[0:1] offset:64
+; GCN-NEXT:    global_load_dwordx4 v[20:23], v64, s[0:1] offset:80
+; GCN-NEXT:    global_load_dwordx4 v[24:27], v64, s[0:1] offset:96
+; GCN-NEXT:    global_load_dwordx4 v[28:31], v64, s[0:1] offset:112
+; GCN-NEXT:    global_load_dwordx4 v[32:35], v64, s[0:1] offset:128
+; GCN-NEXT:    global_load_dwordx4 v[36:39], v64, s[0:1] offset:144
 ; GCN-NEXT:    global_load_dwordx4 v[40:43], v64, s[0:1] offset:160
 ; GCN-NEXT:    global_load_dwordx4 v[44:47], v64, s[0:1] offset:176
 ; GCN-NEXT:    global_load_dwordx4 v[48:51], v64, s[0:1] offset:192
 ; GCN-NEXT:    global_load_dwordx4 v[52:55], v64, s[0:1] offset:208
 ; GCN-NEXT:    global_load_dwordx4 v[56:59], v64, s[0:1] offset:224
 ; GCN-NEXT:    global_load_dwordx4 v[60:63], v64, s[0:1] offset:240
-; GCN-NEXT:    s_waitcnt vmcnt(15)
-; GCN-NEXT:    v_mov_b32_e32 v5, 0x3e7
-; GCN-NEXT:    global_store_dwordx4 v64, v[4:7], s[2:3] offset:144
-; GCN-NEXT:    s_waitcnt vmcnt(15)
+; GCN-NEXT:    s_waitcnt vmcnt(6)
+; GCN-NEXT:    v_mov_b32_e32 v37, 0x3e7
 ; GCN-NEXT:    global_store_dwordx4 v64, v[0:3], s[2:3]
-; GCN-NEXT:    s_waitcnt vmcnt(15)
-; GCN-NEXT:    global_store_dwordx4 v64, v[8:11], s[2:3] offset:16
-; GCN-NEXT:    s_waitcnt vmcnt(15)
-; GCN-NEXT:    global_store_dwordx4 v64, v[12:15], s[2:3] offset:32
-; GCN-NEXT:    s_waitcnt vmcnt(15)
-; GCN-NEXT:    global_store_dwordx4 v64, v[16:19], s[2:3] offset:48
-; GCN-NEXT:    s_waitcnt vmcnt(15)
-; GCN-NEXT:    global_store_dwordx4 v64, v[20:23], s[2:3] offset:64
-; GCN-NEXT:    s_waitcnt vmcnt(15)
-; GCN-NEXT:    global_store_dwordx4 v64, v[24:27], s[2:3] offset:80
-; GCN-NEXT:    s_waitcnt vmcnt(15)
-; GCN-NEXT:    global_store_dwordx4 v64, v[28:31], s[2:3] offset:96
-; GCN-NEXT:    s_waitcnt vmcnt(15)
-; GCN-NEXT:    global_store_dwordx4 v64, v[32:35], s[2:3] offset:112
-; GCN-NEXT:    s_waitcnt vmcnt(15)
-; GCN-NEXT:    global_store_dwordx4 v64, v[36:39], s[2:3] offset:128
+; GCN-NEXT:    global_store_dwordx4 v64, v[4:7], s[2:3] offset:16
+; GCN-NEXT:    global_store_dwordx4 v64, v[8:11], s[2:3] offset:32
+; GCN-NEXT:    global_store_dwordx4 v64, v[12:15], s[2:3] offset:48
+; GCN-NEXT:    global_store_dwordx4 v64, v[16:19], s[2:3] offset:64
+; GCN-NEXT:    global_store_dwordx4 v64, v[20:23], s[2:3] offset:80
+; GCN-NEXT:    global_store_dwordx4 v64, v[24:27], s[2:3] offset:96
+; GCN-NEXT:    global_store_dwordx4 v64, v[28:31], s[2:3] offset:112
+; GCN-NEXT:    global_store_dwordx4 v64, v[32:35], s[2:3] offset:128
+; GCN-NEXT:    global_store_dwordx4 v64, v[36:39], s[2:3] offset:144
 ; GCN-NEXT:    s_waitcnt vmcnt(15)
 ; GCN-NEXT:    global_store_dwordx4 v64, v[40:43], s[2:3] offset:160
 ; GCN-NEXT:    s_waitcnt vmcnt(15)
@@ -67,39 +58,45 @@ define amdgpu_kernel void @v_insert_v64i32_37(ptr addrspace(1) %ptr.in, ptr addr
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0xf
 ; GFX10-NEXT:    global_load_dwordx4 v[0:3], v64, s[0:1]
-; GFX10-NEXT:    global_load_dwordx4 v[8:11], v64, s[0:1] offset:16
-; GFX10-NEXT:    global_load_dwordx4 v[12:15], v64, s[0:1] offset:32
-; GFX10-NEXT:    global_load_dwordx4 v[16:19], v64, s[0:1] offset:48
-; GFX10-NEXT:    global_load_dwordx4 v[20:23], v64, s[0:1] offset:64
-; GFX10-NEXT:    global_load_dwordx4 v[24:27], v64, s[0:1] offset:80
-; GFX10-NEXT:    global_load_dwordx4 v[28:31], v64, s[0:1] offset:96
-; GFX10-NEXT:    global_load_dwordx4 v[32:35], v64, s[0:1] offset:112
-; GFX10-NEXT:    global_load_dwordx4 v[36:39], v64, s[0:1] offset:160
-; GFX10-NEXT:    global_load_dwordx4 v[40:43], v64, s[0:1] offset:176
-; GFX10-NEXT:    global_load_dwordx4 v[44:47], v64, s[0:1] offset:192
-; GFX10-NEXT:    global_load_dwordx4 v[48:51], v64, s[0:1] offset:208
-; GFX10-NEXT:    global_load_dwordx4 v[52:55], v64, s[0:1] offset:224
-; GFX10-NEXT:    global_load_dwordx4 v[56:59], v64, s[0:1] offset:240
-; GFX10-NEXT:    global_load_dwordx4 v[60:63], v64, s[0:1] offset:128
-; GFX10-NEXT:    global_load_dwordx4 v[4:7], v64, s[0:1] offset:144
-; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_mov_b32_e32 v5, 0x3e7
+; GFX10-NEXT:    global_load_dwordx4 v[4:7], v64, s[0:1] offset:16
+; GFX10-NEXT:    global_load_dwordx4 v[8:11], v64, s[0:1] offset:32
+; GFX10-NEXT:    global_load_dwordx4 v[12:15], v64, s[0:1] offset:48
+; GFX10-NEXT:    global_load_dwordx4 v[16:19], v64, s[0:1] offset:64
+; GFX10-NEXT:    global_load_dwordx4 v[20:23], v64, s[0:1] offset:80
+; GFX10-NEXT:    global_load_dwordx4 v[24:27], v64, s[0:1] offset:96
+; GFX10-NEXT:    global_load_dwordx4 v[28:31], v64, s[0:1] offset:112
+; GFX10-NEXT:    global_load_dwordx4 v[32:35], v64, s[0:1] offset:128
+; GFX10-NEXT:    global_load_dwordx4 v[36:39], v64, s[0:1] offset:144
+; GFX10-NEXT:    global_load_dwordx4 v[40:43], v64, s[0:1] offset:160
+; GFX10-NEXT:    global_load_dwordx4 v[44:47], v64, s[0:1] offset:176
+; GFX10-NEXT:    global_load_dwordx4 v[48:51], v64, s[0:1] offset:192
+; GFX10-NEXT:    global_load_dwordx4 v[52:55], v64, s[0:1] offset:208
+; GFX10-NEXT:    global_load_dwordx4 v[56:59], v64, s[0:1] offset:224
+; GFX10-NEXT:    global_load_dwordx4 v[60:63], v64, s[0:1] offset:240
+; GFX10-NEXT:    s_waitcnt vmcnt(6)
+; GFX10-NEXT:    v_mov_b32_e32 v37, 0x3e7
 ; GFX10-NEXT:    global_store_dwordx4 v64, v[0:3], s[2:3]
-; GFX10-NEXT:    global_store_dwordx4 v64, v[8:11], s[2:3] offset:16
-; GFX10-NEXT:    global_store_dwordx4 v64, v[12:15], s[2:3] offset:32
-; GFX10-NEXT:    global_store_dwordx4 v64, v[16:19], s[2:3] offset:48
-; GFX10-NEXT:    global_store_dwordx4 v64, v[20:23], s[2:3] offset:64
-; GFX10-NEXT:    global_store_dwordx4 v64, v[24:27], s[2:3] offset:80
-; GFX10-NEXT:    global_store_dwordx4 v64, v[28:31], s[2:3] offset:96
-; GFX10-NEXT:    global_store_dwordx4 v64, v[32:35], s[2:3] offset:112
-; GFX10-NEXT:    global_store_dwordx4 v64, v[60:63], s[2:3] offset:128
-; GFX10-NEXT:    global_store_dwordx4 v64, v[4:7], s[2:3] offset:144
-; GFX10-NEXT:    global_store_dwordx4 v64, v[36:39], s[2:3] offset:160
-; GFX10-NEXT:    global_store_dwordx4 v64, v[40:43], s[2:3] offset:176
-; GFX10-NEXT:    global_store_dwordx4 v64, v[44:47], s[2:3] offset:192
-; GFX10-NEXT:    global_store_dwordx4 v64, v[48:51], s[2:3] offset:208
-; GFX10-NEXT:    global_store_dwordx4 v64, v[52:55], s[2:3] offset:224
-; GFX10-NEXT:    global_store_dwordx4 v64, v[56:59], s[2:3] offset:240
+; GFX10-NEXT:    global_store_dwordx4 v64, v[4:7], s[2:3] offset:16
+; GFX10-NEXT:    global_store_dwordx4 v64, v[8:11], s[2:3] offset:32
+; GFX10-NEXT:    global_store_dwordx4 v64, v[12:15], s[2:3] offset:48
+; GFX10-NEXT:    global_store_dwordx4 v64, v[16:19], s[2:3] offset:64
+; GFX10-NEXT:    global_store_dwordx4 v64, v[20:23], s[2:3] offset:80
+; GFX10-NEXT:    global_store_dwordx4 v64, v[24:27], s[2:3] offset:96
+; GFX10-NEXT:    global_store_dwordx4 v64, v[28:31], s[2:3] offset:112
+; GFX10-NEXT:    global_store_dwordx4 v64, v[32:35], s[2:3] offset:128
+; GFX10-NEXT:    global_store_dwordx4 v64, v[36:39], s[2:3] offset:144
+; GFX10-NEXT:    s_waitcnt vmcnt(5)
+; GFX10-NEXT:    global_store_dwordx4 v64, v[40:43], s[2:3] offset:160
+; GFX10-NEXT:    s_waitcnt vmcnt(4)
+; GFX10-NEXT:    global_store_dwordx4 v64, v[44:47], s[2:3] offset:176
+; GFX10-NEXT:    s_waitcnt vmcnt(3)
+; GFX10-NEXT:    global_store_dwordx4 v64, v[48:51], s[2:3] offset:192
+; GFX10-NEXT:    s_waitcnt vmcnt(2)
+; GFX10-NEXT:    global_store_dwordx4 v64, v[52:55], s[2:3] offset:208
+; GFX10-NEXT:    s_waitcnt vmcnt(1)
+; GFX10-NEXT:    global_store_dwordx4 v64, v[56:59], s[2:3] offset:224
+; GFX10-NEXT:    s_waitcnt vmcnt(0)
+; GFX10-NEXT:    global_store_dwordx4 v64, v[60:63], s[2:3] offset:240
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: v_insert_v64i32_37:
@@ -109,15 +106,15 @@ define amdgpu_kernel void @v_insert_v64i32_37(ptr addrspace(1) %ptr.in, ptr addr
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_clause 0xf
 ; GFX11-NEXT:    global_load_b128 v[0:3], v64, s[0:1]
-; GFX11-NEXT:    global_load_b128 v[8:11], v64, s[0:1] offset:16
-; GFX11-NEXT:    global_load_b128 v[12:15], v64, s[0:1] offset:32
-; GFX11-NEXT:    global_load_b128 v[16:19], v64, s[0:1] offset:48
-; GFX11-NEXT:    global_load_b128 v[20:23], v64, s[0:1] offset:64
-; GFX11-NEXT:    global_load_b128 v[24:27], v64, s[0:1] offset:80
-; GFX11-NEXT:    global_load_b128 v[28:31], v64, s[0:1] offset:96
-; GFX11-NEXT:    global_load_b128 v[32:35], v64, s[0:1] offset:112
-; GFX11-NEXT:    global_load_b128 v[36:39], v64, s[0:1] offset:128
-; GFX11-NEXT:    global_load_b128 v[4:7], v64, s[0:1] offset:144
+; GFX11-NEXT:    global_load_b128 v[4:7], v64, s[0:1] offset:16
+; GFX11-NEXT:    global_load_b128 v[8:11], v64, s[0:1] offset:32
+; GFX11-NEXT:    global_load_b128 v[12:15], v64, s[0:1] offset:48
+; GFX11-NEXT:    global_load_b128 v[16:19], v64, s[0:1] offset:64
+; GFX11-NEXT:    global_load_b128 v[20:23], v64, s[0:1] offset:80
+; GFX11-NEXT:    global_load_b128 v[24:27], v64, s[0:1] offset:96
+; GFX11-NEXT:    global_load_b128 v[28:31], v64, s[0:1] offset:112
+; GFX11-NEXT:    global_load_b128 v[32:35], v64, s[0:1] offset:128
+; GFX11-NEXT:    global_load_b128 v[36:39], v64, s[0:1] offset:144
 ; GFX11-NEXT:    global_load_b128 v[40:43], v64, s[0:1] offset:160
 ; GFX11-NEXT:    global_load_b128 v[44:47], v64, s[0:1] offset:176
 ; GFX11-NEXT:    global_load_b128 v[48:51], v64, s[0:1] offset:192
@@ -125,18 +122,18 @@ define amdgpu_kernel void @v_insert_v64i32_37(ptr addrspace(1) %ptr.in, ptr addr
 ; GFX11-NEXT:    global_load_b128 v[56:59], v64, s[0:1] offset:224
 ; GFX11-NEXT:    global_load_b128 v[60:63], v64, s[0:1] offset:240
 ; GFX11-NEXT:    s_waitcnt vmcnt(6)
-; GFX11-NEXT:    v_mov_b32_e32 v5, 0x3e7
+; GFX11-NEXT:    v_mov_b32_e32 v37, 0x3e7
 ; GFX11-NEXT:    s_clause 0x9
 ; GFX11-NEXT:    global_store_b128 v64, v[0:3], s[2:3]
-; GFX11-NEXT:    global_store_b128 v64, v[8:11], s[2:3] offset:16
-; GFX11-NEXT:    global_store_b128 v64, v[12:15], s[2:3] offset:32
-; GFX11-NEXT:    global_store_b128 v64, v[16:19], s[2:3] offset:48
-; GFX11-NEXT:    global_store_b128 v64, v[20:23], s[2:3] offset:64
-; GFX11-NEXT:    global_store_b128 v64, v[24:27], s[2:3] offset:80
-; GFX11-NEXT:    global_store_b128 v64, v[28:31], s[2:3] offset:96
-; GFX11-NEXT:    global_store_b128 v64, v[32:35], s[2:3] offset:112
-; GFX11-NEXT:    global_store_b128 v64, v[36:39], s[2:3] offset:128
-; GFX11-NEXT:    global_store_b128 v64, v[4:7], s[2:3] offset:144
+; GFX11-NEXT:    global_store_b128 v64, v[4:7], s[2:3] offset:16
+; GFX11-NEXT:    global_store_b128 v64, v[8:11], s[2:3] offset:32
+; GFX11-NEXT:    global_store_b128 v64, v[12:15], s[2:3] offset:48
+; GFX11-NEXT:    global_store_b128 v64, v[16:19], s[2:3] offset:64
+; GFX11-NEXT:    global_store_b128 v64, v[20:23], s[2:3] offset:80
+; GFX11-NEXT:    global_store_b128 v64, v[24:27], s[2:3] offset:96
+; GFX11-NEXT:    global_store_b128 v64, v[28:31], s[2:3] offset:112
+; GFX11-NEXT:    global_store_b128 v64, v[32:35], s[2:3] offset:128
+; GFX11-NEXT:    global_store_b128 v64, v[36:39], s[2:3] offset:144
 ; GFX11-NEXT:    s_waitcnt vmcnt(5)
 ; GFX11-NEXT:    global_store_b128 v64, v[40:43], s[2:3] offset:160
 ; GFX11-NEXT:    s_waitcnt vmcnt(4)

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
index dc9cbb498dab4d..49d9ad1c0f7974 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/insertelement.ll
@@ -715,27 +715,27 @@ define void @dyn_insertelement_v8f64_const_s_v_v(double %val, i32 %idx) {
 ; GPRIDX-NEXT:    v_mov_b32_e32 v16, s17
 ; GPRIDX-NEXT:    v_mov_b32_e32 v17, s18
 ; GPRIDX-NEXT:    v_mov_b32_e32 v18, s19
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[16:17], 0, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 2, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 3, v2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 4, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 1, v2
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s[4:5]
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 2, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 3, v2
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[10:11], 5, v2
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[12:13], 6, v2
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[14:15], 7, v2
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v0, s[16:17]
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v0, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v1, s[16:17]
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v1, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v0, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v11, v0, s[8:9]
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[16:17], 4, v2
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v0, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v11, v0, s[16:17]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v13, v13, v0, s[10:11]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v15, v15, v0, s[12:13]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v17, v17, v0, s[14:15]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v1, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v1, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v1, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v1, s[16:17]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v14, v14, v1, s[10:11]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v16, v16, v1, s[12:13]
 ; GPRIDX-NEXT:    v_cndmask_b32_e64 v18, v18, v1, s[14:15]
@@ -752,12 +752,12 @@ define void @dyn_insertelement_v8f64_const_s_v_v(double %val, i32 %idx) {
 ; GFX10-LABEL: dyn_insertelement_v8f64_const_s_v_v:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT:    s_mov_b64 s[4:5], 1.0
 ; GFX10-NEXT:    s_mov_b32 s18, 0
 ; GFX10-NEXT:    s_mov_b32 s16, 0
 ; GFX10-NEXT:    s_mov_b32 s14, 0
 ; GFX10-NEXT:    s_mov_b32 s12, 0
 ; GFX10-NEXT:    s_mov_b32 s8, 0
-; GFX10-NEXT:    s_mov_b64 s[4:5], 1.0
 ; GFX10-NEXT:    s_mov_b32 s19, 0x40200000
 ; GFX10-NEXT:    s_mov_b32 s17, 0x401c0000
 ; GFX10-NEXT:    s_mov_b32 s15, 0x40180000
@@ -767,6 +767,7 @@ define void @dyn_insertelement_v8f64_const_s_v_v(double %val, i32 %idx) {
 ; GFX10-NEXT:    s_mov_b64 s[6:7], 2.0
 ; GFX10-NEXT:    v_mov_b32_e32 v3, s4
 ; GFX10-NEXT:    v_mov_b32_e32 v4, s5
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s6
 ; GFX10-NEXT:    v_mov_b32_e32 v6, s7
 ; GFX10-NEXT:    v_mov_b32_e32 v7, s8
@@ -781,30 +782,29 @@ define void @dyn_insertelement_v8f64_const_s_v_v(double %val, i32 %idx) {
 ; GFX10-NEXT:    v_mov_b32_e32 v16, s17
 ; GFX10-NEXT:    v_mov_b32_e32 v17, s18
 ; GFX10-NEXT:    v_mov_b32_e32 v18, s19
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 1, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 2, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 3, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s7, 5, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s10, 4, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s8, 6, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s9, 7, v2
 ; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s4
 ; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 7, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s4
 ; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v0, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v1, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v0, s10
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v0, s7
-; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, v1, s10
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v1, s7
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v0, s8
-; GFX10-NEXT:    v_cndmask_b32_e64 v17, v17, v0, s9
-; GFX10-NEXT:    v_cndmask_b32_e64 v16, v16, v1, s8
-; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v1, s9
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 3, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v1, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v17, v17, v0, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 5, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v11, v11, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, v12, v1, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v1, s5
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v0, s4
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v1, s4
+; GFX10-NEXT:    v_cndmask_b32_e32 v15, v15, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v1, vcc_lo
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[3:6], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[7:10], off
@@ -990,26 +990,26 @@ define amdgpu_ps void @dyn_insertelement_v8f64_s_s_v(<8 x double> inreg %vec, do
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s18, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s19, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 2, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 7, v0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, s18, s0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, s19, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 4, v0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s2, 5, v0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s3, 6, v0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 7, v0
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, s18, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s19, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s18, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, s19, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, s18, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, s19, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, s18, s2
-; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, s19, s2
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, s18, s3
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, s19, s3
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, s18, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v16, v16, s19, s4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v0
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, s18, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s19, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v0
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, s18, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s18, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, s19, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v0
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, s18, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, s19, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v0
+; GFX10-NEXT:    v_cndmask_b32_e64 v16, v16, s19, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, s18, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, s19, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, s18, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, s19, vcc_lo
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[1:4], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[5:8], off
@@ -1394,6 +1394,9 @@ define amdgpu_ps void @dyn_insertelement_v8f64_s_v_v(<8 x double> inreg %vec, do
 ; GFX10-NEXT:    s_mov_b32 s12, s14
 ; GFX10-NEXT:    s_mov_b32 s14, s16
 ; GFX10-NEXT:    v_mov_b32_e32 v18, s15
+; GFX10-NEXT:    v_mov_b32_e32 v4, s1
+; GFX10-NEXT:    v_mov_b32_e32 v3, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX10-NEXT:    v_mov_b32_e32 v17, s14
 ; GFX10-NEXT:    v_mov_b32_e32 v16, s13
 ; GFX10-NEXT:    v_mov_b32_e32 v15, s12
@@ -1407,32 +1410,29 @@ define amdgpu_ps void @dyn_insertelement_v8f64_s_v_v(<8 x double> inreg %vec, do
 ; GFX10-NEXT:    v_mov_b32_e32 v7, s4
 ; GFX10-NEXT:    v_mov_b32_e32 v6, s3
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s2
-; GFX10-NEXT:    v_mov_b32_e32 v4, s1
-; GFX10-NEXT:    v_mov_b32_e32 v3, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 3, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 2, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s2, 4, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s3, 5, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 6, v2
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 7, v2
 ; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s0
 ; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 7, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v0, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v1, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v0, s2
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v0, s3
-; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, v1, s2
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v1, s3
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v0, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v17, v17, v0, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v16, v16, v1, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v1, s5
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v1, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v17, v17, v0, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v11, v11, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, v12, v1, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v2
+; GFX10-NEXT:    v_cndmask_b32_e64 v18, v18, v1, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v0, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v1, s0
+; GFX10-NEXT:    v_cndmask_b32_e32 v15, v15, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v16, v16, v1, vcc_lo
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[3:6], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[7:10], off
@@ -1517,31 +1517,31 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_s_v(<8 x double> %vec, double i
 ; GPRIDX-LABEL: dyn_insertelement_v8f64_v_s_v:
 ; GPRIDX:       ; %bb.0: ; %entry
 ; GPRIDX-NEXT:    v_mov_b32_e32 v17, s2
+; GPRIDX-NEXT:    v_mov_b32_e32 v18, s3
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v16
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], 1, v16
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[14:15], 2, v16
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 3, v16
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 4, v16
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 5, v16
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v16
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[12:13], 6, v16
-; GPRIDX-NEXT:    v_mov_b32_e32 v16, s3
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v0, v0, v17, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v2, v17, s[0:1]
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v16, s[0:1]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v17, s[14:15]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v17, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v17, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v17, s[8:9]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v17, s[12:13]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v14, v14, v17, s[10:11]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v16, s[14:15]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v16, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v16, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v11, v16, s[8:9]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v13, v13, v16, s[12:13]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v15, v15, v16, s[10:11]
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v18, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v16
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v17, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v18, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v16
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v17, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v18, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v16
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v17, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v7, v18, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v16
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v8, v17, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v9, v18, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v16
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v10, v10, v17, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v11, v11, v18, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v16
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v12, v12, v17, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v13, v13, v18, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v16
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v14, v14, v17, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v15, v15, v18, vcc
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GPRIDX-NEXT:    s_waitcnt vmcnt(0)
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -1555,29 +1555,29 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_s_v(<8 x double> %vec, double i
 ; GFX10-LABEL: dyn_insertelement_v8f64_v_s_v:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v16
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v16
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 7, v16
 ; GFX10-NEXT:    v_cndmask_b32_e64 v0, v0, s2, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v1, v1, s3, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s2, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, s3, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, s2, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, s3, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, s2, s1
 ; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, s2, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, s3, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s2, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s3, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, s2, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, s3, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, s3, s1
 ; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, s2, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, s3, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, s2, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, s3, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, s2, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, s3, s0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, s2, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, s3, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v16
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, s2, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, s3, vcc_lo
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -1591,29 +1591,29 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_s_v(<8 x double> %vec, double i
 ; GFX11-LABEL: dyn_insertelement_v8f64_v_s_v:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v16
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v16
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 7, v16
 ; GFX11-NEXT:    v_cndmask_b32_e64 v0, v0, s2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v1, v1, s3, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s2, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, s3, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, s2, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, s3, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, s2, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, s2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, s3, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, s2, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, s3, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, s2, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, s3, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, s3, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, s2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, s3, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, s2, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, s3, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, s2, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, s3, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v12, v12, s2, vcc_lo
 ; GFX11-NEXT:    v_cndmask_b32_e64 v13, v13, s3, vcc_lo
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 7, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, s2, vcc_lo
-; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, s3, vcc_lo
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[0:3], off dlc
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[4:7], off dlc
@@ -1704,29 +1704,29 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v(<8 x double> %vec, double %
 ; GPRIDX-LABEL: dyn_insertelement_v8f64_v_v_v:
 ; GPRIDX:       ; %bb.0: ; %entry
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], 1, v18
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s[0:1]
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[2:3], 2, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 3, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 4, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 5, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[12:13], 6, v18
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s[0:1]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s[2:3]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s[8:9]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s[12:13]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s[10:11]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s[2:3]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s[8:9]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s[12:13]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s[10:11]
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v7, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v8, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v9, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v10, v10, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v11, v11, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v12, v12, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v13, v13, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v14, v14, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v15, v15, v17, vcc
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GPRIDX-NEXT:    s_waitcnt vmcnt(0)
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -1741,28 +1741,28 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v(<8 x double> %vec, double %
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v18
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 2, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s2, 3, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s3, 4, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 5, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 7, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 6, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 7, v18
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s0
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s2
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s2
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s3
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s3
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s5
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s1
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v17, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s1
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v16, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v9, v9, v17, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s0
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, v12, v16, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v13, v13, v17, vcc_lo
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -1777,27 +1777,24 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v(<8 x double> %vec, double %
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v18
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 2, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 3, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s3, 4, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s4, 5, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s5, 7, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s6, 6, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 7, v18
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v0, v16 :: v_dual_cndmask_b32 v1, v1, v17
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v18
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s3
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s4
-; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s3
-; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s4
-; GFX11-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s6
-; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s5
-; GFX11-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s6
-; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s5
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v18
+; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s1
+; GFX11-NEXT:    v_dual_cndmask_b32 v4, v4, v16 :: v_dual_cndmask_b32 v5, v5, v17
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v18
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v18
+; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s1
+; GFX11-NEXT:    v_dual_cndmask_b32 v8, v8, v16 :: v_dual_cndmask_b32 v9, v9, v17
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v18
+; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s0
+; GFX11-NEXT:    v_dual_cndmask_b32 v12, v12, v16 :: v_dual_cndmask_b32 v13, v13, v17
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[0:3], off dlc
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[4:7], off dlc
@@ -2436,29 +2433,29 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v_add_1(<8 x double> %vec, do
 ; GPRIDX:       ; %bb.0: ; %entry
 ; GPRIDX-NEXT:    v_add_u32_e32 v18, 1, v18
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], 1, v18
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s[0:1]
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[2:3], 2, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 3, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 4, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 5, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[10:11], 7, v18
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[12:13], 6, v18
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s[0:1]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s[2:3]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s[8:9]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s[12:13]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s[10:11]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s[2:3]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s[6:7]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s[8:9]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s[12:13]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s[10:11]
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v7, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v8, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v9, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v10, v10, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v11, v11, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v12, v12, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v13, v13, v17, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 7, v18
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v14, v14, v16, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v15, v15, v17, vcc
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GPRIDX-NEXT:    s_waitcnt vmcnt(0)
 ; GPRIDX-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -2474,28 +2471,28 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v_add_1(<8 x double> %vec, do
 ; GFX10-NEXT:    v_add_nc_u32_e32 v18, 1, v18
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v18
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 2, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s2, 3, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s3, 4, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s4, 5, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s5, 7, v18
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s6, 6, v18
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 7, v18
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s0
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s0
 ; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s2
-; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s1
-; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s2
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s3
-; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s3
-; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s4
-; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s5
-; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s6
-; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s5
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s1
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v17, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s1
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v16, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v9, v9, v17, vcc_lo
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v18
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s0
+; GFX10-NEXT:    v_cndmask_b32_e32 v12, v12, v16, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v13, v13, v17, vcc_lo
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[0:3], off
 ; GFX10-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX10-NEXT:    global_store_dwordx4 v[0:1], v[4:7], off
@@ -2512,27 +2509,26 @@ define amdgpu_ps void @dyn_insertelement_v8f64_v_v_v_add_1(<8 x double> %vec, do
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v18
 ; GFX11-NEXT:    v_cndmask_b32_e32 v1, v1, v17, vcc_lo
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 2, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s2, 3, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s3, 4, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s4, 5, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s5, 7, v18
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s6, 6, v18
 ; GFX11-NEXT:    v_cndmask_b32_e32 v0, v0, v16, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v18
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 7, v18
 ; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v16, s0
 ; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v17, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v16, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v17, s1
-; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s2
-; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v16, s3
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s4
-; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v17, s3
-; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s4
-; GFX11-NEXT:    v_cndmask_b32_e64 v12, v12, v16, s6
-; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s5
-; GFX11-NEXT:    v_cndmask_b32_e64 v13, v13, v17, s6
-; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s5
+; GFX11-NEXT:    v_cndmask_b32_e32 v5, v5, v17, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v18
+; GFX11-NEXT:    v_cndmask_b32_e32 v4, v4, v16, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v18
+; GFX11-NEXT:    v_cndmask_b32_e64 v14, v14, v16, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v15, v15, v17, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v16, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v17, s0
+; GFX11-NEXT:    v_cndmask_b32_e32 v9, v9, v17, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v18
+; GFX11-NEXT:    v_cndmask_b32_e32 v8, v8, v16, vcc_lo
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v18
+; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v16, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v17, s0
+; GFX11-NEXT:    v_dual_cndmask_b32 v13, v13, v17 :: v_dual_cndmask_b32 v12, v12, v16
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[0:3], off dlc
 ; GFX11-NEXT:    s_waitcnt_vscnt null, 0x0
 ; GFX11-NEXT:    global_store_b128 v[0:1], v[4:7], off dlc
@@ -5675,10 +5671,6 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_v(<7 x double> inreg
 ; GPRIDX-NEXT:    s_mov_b32 s12, s14
 ; GPRIDX-NEXT:    s_mov_b32 s13, s15
 ; GPRIDX-NEXT:    v_mov_b32_e32 v18, s15
-; GPRIDX-NEXT:    v_mov_b32_e32 v3, s0
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
-; GPRIDX-NEXT:    v_mov_b32_e32 v4, s1
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc
 ; GPRIDX-NEXT:    v_mov_b32_e32 v17, s14
 ; GPRIDX-NEXT:    v_mov_b32_e32 v16, s13
 ; GPRIDX-NEXT:    v_mov_b32_e32 v15, s12
@@ -5692,39 +5684,43 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_v(<7 x double> inreg
 ; GPRIDX-NEXT:    v_mov_b32_e32 v7, s4
 ; GPRIDX-NEXT:    v_mov_b32_e32 v6, s3
 ; GPRIDX-NEXT:    v_mov_b32_e32 v5, s2
+; GPRIDX-NEXT:    v_mov_b32_e32 v4, s1
+; GPRIDX-NEXT:    v_mov_b32_e32 v3, s0
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], 2, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[2:3], 3, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 4, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 5, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], 6, v2
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[10:11], 1, v2
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v5, v0, s[10:11]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v7, v0, s[0:1]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v9, v0, s[2:3]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v11, v0, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v11, v13, v0, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v0, v15, v0, s[8:9]
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s[10:11]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v1, s[0:1]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s[2:3]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v12, v12, v1, s[4:5]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v13, v14, v1, s[6:7]
+; GPRIDX-NEXT:    v_cndmask_b32_e64 v1, v16, v1, s[8:9]
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s0, v3
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v4, v1, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v2
-; GPRIDX-NEXT:    v_readfirstlane_b32 s1, v3
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v5, v0, vcc
-; GPRIDX-NEXT:    v_readfirstlane_b32 s2, v3
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v6, v1, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v2
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v7, v0, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v8, v1, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v2
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v9, v0, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v10, v1, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v2
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v11, v0, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v12, v1, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 5, v2
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v10, v13, v0, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v11, v14, v1, vcc
-; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 6, v2
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v12, v15, v0, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e32 v13, v16, v1, vcc
-; GPRIDX-NEXT:    v_readfirstlane_b32 s3, v3
-; GPRIDX-NEXT:    v_readfirstlane_b32 s4, v4
-; GPRIDX-NEXT:    v_readfirstlane_b32 s5, v5
-; GPRIDX-NEXT:    v_readfirstlane_b32 s6, v6
-; GPRIDX-NEXT:    v_readfirstlane_b32 s7, v7
-; GPRIDX-NEXT:    v_readfirstlane_b32 s8, v8
-; GPRIDX-NEXT:    v_readfirstlane_b32 s9, v9
-; GPRIDX-NEXT:    v_readfirstlane_b32 s10, v10
-; GPRIDX-NEXT:    v_readfirstlane_b32 s11, v11
-; GPRIDX-NEXT:    v_readfirstlane_b32 s12, v12
-; GPRIDX-NEXT:    v_readfirstlane_b32 s13, v13
+; GPRIDX-NEXT:    v_readfirstlane_b32 s1, v4
+; GPRIDX-NEXT:    v_readfirstlane_b32 s2, v2
+; GPRIDX-NEXT:    v_readfirstlane_b32 s3, v6
+; GPRIDX-NEXT:    v_readfirstlane_b32 s4, v5
+; GPRIDX-NEXT:    v_readfirstlane_b32 s5, v8
+; GPRIDX-NEXT:    v_readfirstlane_b32 s6, v7
+; GPRIDX-NEXT:    v_readfirstlane_b32 s7, v10
+; GPRIDX-NEXT:    v_readfirstlane_b32 s8, v9
+; GPRIDX-NEXT:    v_readfirstlane_b32 s9, v12
+; GPRIDX-NEXT:    v_readfirstlane_b32 s10, v11
+; GPRIDX-NEXT:    v_readfirstlane_b32 s11, v13
+; GPRIDX-NEXT:    v_readfirstlane_b32 s12, v0
+; GPRIDX-NEXT:    v_readfirstlane_b32 s13, v1
 ; GPRIDX-NEXT:    ; return to shader part epilog
 ;
 ; GFX10-LABEL: dyn_insertelement_v7f64_s_v_v:
@@ -5744,13 +5740,9 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_v(<7 x double> inreg
 ; GFX10-NEXT:    s_mov_b32 s12, s14
 ; GFX10-NEXT:    s_mov_b32 s13, s15
 ; GFX10-NEXT:    v_mov_b32_e32 v18, s15
-; GFX10-NEXT:    v_mov_b32_e32 v3, s0
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
-; GFX10-NEXT:    v_mov_b32_e32 v4, s1
 ; GFX10-NEXT:    v_mov_b32_e32 v17, s14
 ; GFX10-NEXT:    v_mov_b32_e32 v16, s13
 ; GFX10-NEXT:    v_mov_b32_e32 v15, s12
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc_lo
 ; GFX10-NEXT:    v_mov_b32_e32 v14, s11
 ; GFX10-NEXT:    v_mov_b32_e32 v13, s10
 ; GFX10-NEXT:    v_mov_b32_e32 v12, s9
@@ -5761,39 +5753,43 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_v(<7 x double> inreg
 ; GFX10-NEXT:    v_mov_b32_e32 v7, s4
 ; GFX10-NEXT:    v_mov_b32_e32 v6, s3
 ; GFX10-NEXT:    v_mov_b32_e32 v5, s2
-; GFX10-NEXT:    v_readfirstlane_b32 s0, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v4, v1, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s1, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v5, v0, vcc_lo
-; GFX10-NEXT:    v_readfirstlane_b32 s2, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v6, v1, vcc_lo
+; GFX10-NEXT:    v_mov_b32_e32 v4, s1
+; GFX10-NEXT:    v_mov_b32_e32 v3, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v2
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 6, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v1, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v7, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v8, v1, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s4, v4
-; GFX10-NEXT:    v_readfirstlane_b32 s5, v5
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, v9, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v10, v1, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v2
+; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v1, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s6, v6
-; GFX10-NEXT:    v_readfirstlane_b32 s7, v7
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v11, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v12, v1, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
-; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v13, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, v14, v1, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s10, v10
-; GFX10-NEXT:    v_readfirstlane_b32 s11, v11
-; GFX10-NEXT:    v_cndmask_b32_e32 v12, v15, v0, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v13, v16, v1, vcc_lo
-; GFX10-NEXT:    v_readfirstlane_b32 s12, v12
-; GFX10-NEXT:    v_readfirstlane_b32 s13, v13
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v2
+; GFX10-NEXT:    v_readfirstlane_b32 s2, v5
+; GFX10-NEXT:    v_cndmask_b32_e32 v11, v11, v0, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v2, v12, v1, vcc_lo
+; GFX10-NEXT:    v_readfirstlane_b32 s3, v6
+; GFX10-NEXT:    v_cndmask_b32_e64 v12, v13, v0, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v14, v1, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v0, v15, v0, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v1, v16, v1, s1
+; GFX10-NEXT:    v_readfirstlane_b32 s0, v3
+; GFX10-NEXT:    v_readfirstlane_b32 s1, v4
+; GFX10-NEXT:    v_readfirstlane_b32 s4, v7
+; GFX10-NEXT:    v_readfirstlane_b32 s5, v8
+; GFX10-NEXT:    v_readfirstlane_b32 s6, v9
+; GFX10-NEXT:    v_readfirstlane_b32 s7, v10
+; GFX10-NEXT:    v_readfirstlane_b32 s8, v11
+; GFX10-NEXT:    v_readfirstlane_b32 s9, v2
+; GFX10-NEXT:    v_readfirstlane_b32 s10, v12
+; GFX10-NEXT:    v_readfirstlane_b32 s11, v13
+; GFX10-NEXT:    v_readfirstlane_b32 s12, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s13, v1
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: dyn_insertelement_v7f64_s_v_v:
@@ -5813,44 +5809,45 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_s_v_v(<7 x double> inreg
 ; GFX11-NEXT:    s_mov_b32 s12, s14
 ; GFX11-NEXT:    s_mov_b32 s13, s15
 ; GFX11-NEXT:    v_dual_mov_b32 v18, s15 :: v_dual_mov_b32 v17, s14
-; GFX11-NEXT:    v_dual_mov_b32 v4, s1 :: v_dual_mov_b32 v3, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX11-NEXT:    v_dual_mov_b32 v16, s13 :: v_dual_mov_b32 v15, s12
 ; GFX11-NEXT:    v_dual_mov_b32 v14, s11 :: v_dual_mov_b32 v13, s10
 ; GFX11-NEXT:    v_dual_mov_b32 v12, s9 :: v_dual_mov_b32 v11, s8
 ; GFX11-NEXT:    v_dual_mov_b32 v10, s7 :: v_dual_mov_b32 v9, s6
 ; GFX11-NEXT:    v_dual_mov_b32 v8, s5 :: v_dual_mov_b32 v7, s4
 ; GFX11-NEXT:    v_dual_mov_b32 v6, s3 :: v_dual_mov_b32 v5, s2
+; GFX11-NEXT:    v_dual_mov_b32 v4, s1 :: v_dual_mov_b32 v3, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v2
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v2
-; GFX11-NEXT:    v_dual_cndmask_b32 v18, v3, v0 :: v_dual_cndmask_b32 v17, v4, v1
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 6, v2
+; GFX11-NEXT:    v_dual_cndmask_b32 v3, v3, v0 :: v_dual_cndmask_b32 v4, v4, v1
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v2
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s6, 4, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v3, v5, v0, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v6, v1, s0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s9, 6, v2
-; GFX11-NEXT:    v_dual_cndmask_b32 v5, v7, v0 :: v_dual_cndmask_b32 v6, v8, v1
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v2
-; GFX11-NEXT:    v_readfirstlane_b32 s0, v18
-; GFX11-NEXT:    v_readfirstlane_b32 s1, v17
-; GFX11-NEXT:    v_readfirstlane_b32 s2, v3
-; GFX11-NEXT:    v_readfirstlane_b32 s3, v4
-; GFX11-NEXT:    v_dual_cndmask_b32 v7, v9, v0 :: v_dual_cndmask_b32 v8, v10, v1
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v2
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, v11, v0, s6
-; GFX11-NEXT:    v_cndmask_b32_e64 v9, v12, v1, s6
-; GFX11-NEXT:    v_readfirstlane_b32 s4, v5
-; GFX11-NEXT:    v_readfirstlane_b32 s5, v6
-; GFX11-NEXT:    v_dual_cndmask_b32 v11, v13, v0 :: v_dual_cndmask_b32 v12, v14, v1
-; GFX11-NEXT:    v_cndmask_b32_e64 v14, v15, v0, s9
-; GFX11-NEXT:    v_cndmask_b32_e64 v13, v16, v1, s9
-; GFX11-NEXT:    v_readfirstlane_b32 s6, v7
-; GFX11-NEXT:    v_readfirstlane_b32 s7, v8
-; GFX11-NEXT:    v_readfirstlane_b32 s8, v10
-; GFX11-NEXT:    v_readfirstlane_b32 s9, v9
-; GFX11-NEXT:    v_readfirstlane_b32 s10, v11
-; GFX11-NEXT:    v_readfirstlane_b32 s11, v12
-; GFX11-NEXT:    v_readfirstlane_b32 s12, v14
-; GFX11-NEXT:    v_readfirstlane_b32 s13, v13
+; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v0, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v1, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v2
+; GFX11-NEXT:    v_dual_cndmask_b32 v7, v7, v0 :: v_dual_cndmask_b32 v8, v8, v1
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v2
+; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v0, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v1, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v2
+; GFX11-NEXT:    v_readfirstlane_b32 s2, v5
+; GFX11-NEXT:    v_dual_cndmask_b32 v11, v11, v0 :: v_dual_cndmask_b32 v2, v12, v1
+; GFX11-NEXT:    v_readfirstlane_b32 s3, v6
+; GFX11-NEXT:    v_cndmask_b32_e64 v12, v13, v0, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v13, v14, v1, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v0, v15, v0, s1
+; GFX11-NEXT:    v_cndmask_b32_e64 v1, v16, v1, s1
+; GFX11-NEXT:    v_readfirstlane_b32 s0, v3
+; GFX11-NEXT:    v_readfirstlane_b32 s1, v4
+; GFX11-NEXT:    v_readfirstlane_b32 s4, v7
+; GFX11-NEXT:    v_readfirstlane_b32 s5, v8
+; GFX11-NEXT:    v_readfirstlane_b32 s6, v9
+; GFX11-NEXT:    v_readfirstlane_b32 s7, v10
+; GFX11-NEXT:    v_readfirstlane_b32 s8, v11
+; GFX11-NEXT:    v_readfirstlane_b32 s9, v2
+; GFX11-NEXT:    v_readfirstlane_b32 s10, v12
+; GFX11-NEXT:    v_readfirstlane_b32 s11, v13
+; GFX11-NEXT:    v_readfirstlane_b32 s12, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s13, v1
 ; GFX11-NEXT:    ; return to shader part epilog
 entry:
   %insert = insertelement <7 x double> %vec, double %val, i32 %idx
@@ -5951,38 +5948,38 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_v_v_v(<7 x double> %vec,
 ; GFX10-LABEL: dyn_insertelement_v7f64_v_v_v:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v16
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v16
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 6, v16
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v14, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v15, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v16
-; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v15, vcc_lo
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v16
-; GFX10-NEXT:    v_readfirstlane_b32 s2, v2
-; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v14, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v15, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v12, v12, v14, s1
 ; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v14, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v15, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v14, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v15, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 5, v16
+; GFX10-NEXT:    v_cndmask_b32_e64 v13, v13, v15, s1
+; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v14, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v9, v9, v15, vcc_lo
+; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX10-NEXT:    v_cndmask_b32_e64 v10, v10, v14, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v11, v11, v15, s0
+; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX10-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX10-NEXT:    v_readfirstlane_b32 s5, v5
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, v6, v14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v15, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
 ; GFX10-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX10-NEXT:    v_readfirstlane_b32 s7, v7
-; GFX10-NEXT:    v_cndmask_b32_e32 v8, v8, v14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v9, v9, v15, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 5, v16
 ; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
-; GFX10-NEXT:    v_cndmask_b32_e32 v10, v10, v14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v11, v11, v15, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 6, v16
 ; GFX10-NEXT:    v_readfirstlane_b32 s10, v10
 ; GFX10-NEXT:    v_readfirstlane_b32 s11, v11
-; GFX10-NEXT:    v_cndmask_b32_e32 v12, v12, v14, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v13, v13, v15, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s12, v12
 ; GFX10-NEXT:    v_readfirstlane_b32 s13, v13
 ; GFX10-NEXT:    ; return to shader part epilog
@@ -5990,35 +5987,37 @@ define amdgpu_ps <7 x double> @dyn_insertelement_v7f64_v_v_v(<7 x double> %vec,
 ; GFX11-LABEL: dyn_insertelement_v7f64_v_v_v:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s9, 5, v16
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s10, 6, v16
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v16
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 6, v16
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v0, v14 :: v_dual_cndmask_b32 v1, v1, v15
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v16
-; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v14, s9
-; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v15, s9
-; GFX11-NEXT:    v_cndmask_b32_e64 v12, v12, v14, s10
-; GFX11-NEXT:    v_cndmask_b32_e64 v13, v13, v15, s10
-; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v14 :: v_dual_cndmask_b32 v3, v3, v15
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v16
-; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v14, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v15, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v12, v12, v14, s1
+; GFX11-NEXT:    v_dual_cndmask_b32 v4, v4, v14 :: v_dual_cndmask_b32 v5, v5, v15
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v14, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v15, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 5, v16
+; GFX11-NEXT:    v_cndmask_b32_e64 v13, v13, v15, s1
+; GFX11-NEXT:    v_dual_cndmask_b32 v8, v8, v14 :: v_dual_cndmask_b32 v9, v9, v15
 ; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
+; GFX11-NEXT:    v_cndmask_b32_e64 v10, v10, v14, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v11, v11, v15, s0
+; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX11-NEXT:    v_readfirstlane_b32 s3, v3
-; GFX11-NEXT:    v_dual_cndmask_b32 v4, v4, v14 :: v_dual_cndmask_b32 v5, v5, v15
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v16
-; GFX11-NEXT:    v_readfirstlane_b32 s10, v10
-; GFX11-NEXT:    v_readfirstlane_b32 s11, v11
 ; GFX11-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX11-NEXT:    v_readfirstlane_b32 s5, v5
-; GFX11-NEXT:    v_dual_cndmask_b32 v6, v6, v14 :: v_dual_cndmask_b32 v7, v7, v15
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 4, v16
-; GFX11-NEXT:    v_readfirstlane_b32 s12, v12
-; GFX11-NEXT:    v_readfirstlane_b32 s13, v13
 ; GFX11-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX11-NEXT:    v_readfirstlane_b32 s7, v7
-; GFX11-NEXT:    v_dual_cndmask_b32 v8, v8, v14 :: v_dual_cndmask_b32 v9, v9, v15
 ; GFX11-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX11-NEXT:    v_readfirstlane_b32 s9, v9
+; GFX11-NEXT:    v_readfirstlane_b32 s10, v10
+; GFX11-NEXT:    v_readfirstlane_b32 s11, v11
+; GFX11-NEXT:    v_readfirstlane_b32 s12, v12
+; GFX11-NEXT:    v_readfirstlane_b32 s13, v13
 ; GFX11-NEXT:    ; return to shader part epilog
 entry:
   %insert = insertelement <7 x double> %vec, double %val, i32 %idx
@@ -6352,20 +6351,20 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_s(<5 x double> %vec,
 ; GPRIDX-LABEL: dyn_insertelement_v5f64_v_v_s:
 ; GPRIDX:       ; %bb.0: ; %entry
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 0
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], s2, 1
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[8:9], s2, 2
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], s2, 3
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], s2, 4
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s[0:1]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s[8:9]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s[6:7]
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s[0:1]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v11, s[8:9]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s[6:7]
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 1
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 2
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 3
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v7, v11, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e64 vcc, s2, 4
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v8, v10, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v9, v11, vcc
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s0, v0
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s1, v1
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s2, v2
@@ -6381,55 +6380,56 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_s(<5 x double> %vec,
 ; GFX10-LABEL: dyn_insertelement_v5f64_v_v_s:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 0
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, s2, 4
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, s2, 1
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, s2, 4
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 1
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s0
-; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
-; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s0
 ; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 2
-; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
-; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
-; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, s2, 3
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s1
 ; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 3
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s0
+; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX10-NEXT:    v_readfirstlane_b32 s2, v2
+; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
 ; GFX10-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX10-NEXT:    v_readfirstlane_b32 s5, v5
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v11, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX10-NEXT:    v_readfirstlane_b32 s7, v7
+; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
+; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: dyn_insertelement_v5f64_v_v_s:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 0
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, s2, 2
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, s2, 1
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, s2, 4
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v0, v10 :: v_dual_cndmask_b32 v1, v1, v11
-; GFX11-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 1
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v11, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 2
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, s2, 3
 ; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s1
-; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v10 :: v_dual_cndmask_b32 v3, v3, v11
-; GFX11-NEXT:    v_cmp_eq_u32_e64 vcc_lo, s2, 3
+; GFX11-NEXT:    v_dual_cndmask_b32 v4, v4, v10 :: v_dual_cndmask_b32 v5, v5, v11
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s0
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX11-NEXT:    v_readfirstlane_b32 s3, v3
-; GFX11-NEXT:    v_dual_cndmask_b32 v6, v6, v10 :: v_dual_cndmask_b32 v7, v7, v11
 ; GFX11-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX11-NEXT:    v_readfirstlane_b32 s5, v5
-; GFX11-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX11-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX11-NEXT:    v_readfirstlane_b32 s7, v7
+; GFX11-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX11-NEXT:    v_readfirstlane_b32 s9, v9
 ; GFX11-NEXT:    ; return to shader part epilog
 entry:
@@ -6441,20 +6441,20 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_v(<5 x double> %vec,
 ; GPRIDX-LABEL: dyn_insertelement_v5f64_v_v_v:
 ; GPRIDX:       ; %bb.0: ; %entry
 ; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v12
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[0:1], 1, v12
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[2:3], 2, v12
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[4:5], 3, v12
-; GPRIDX-NEXT:    v_cmp_eq_u32_e64 s[6:7], 4, v12
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s[0:1]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s[2:3]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s[6:7]
 ; GPRIDX-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s[0:1]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v5, v5, v11, s[2:3]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s[4:5]
-; GPRIDX-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s[6:7]
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 1, v12
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 2, v12
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 3, v12
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v7, v7, v11, vcc
+; GPRIDX-NEXT:    v_cmp_eq_u32_e32 vcc, 4, v12
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v8, v8, v10, vcc
+; GPRIDX-NEXT:    v_cndmask_b32_e32 v9, v9, v11, vcc
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s0, v0
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s1, v1
 ; GPRIDX-NEXT:    v_readfirstlane_b32 s2, v2
@@ -6470,55 +6470,56 @@ define amdgpu_ps <5 x double> @dyn_insertelement_v5f64_v_v_v(<5 x double> %vec,
 ; GFX10-LABEL: dyn_insertelement_v5f64_v_v_v:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v12
-; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 4, v12
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 1, v12
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s1, 4, v12
 ; GFX10-NEXT:    v_cndmask_b32_e32 v0, v0, v10, vcc_lo
 ; GFX10-NEXT:    v_cndmask_b32_e32 v1, v1, v11, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v12
-; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s0
-; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v12
+; GFX10-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s0
+; GFX10-NEXT:    v_cmp_eq_u32_e64 s0, 3, v12
+; GFX10-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s1
+; GFX10-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s1
+; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s0
+; GFX10-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s0
 ; GFX10-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX10-NEXT:    v_readfirstlane_b32 s1, v1
-; GFX10-NEXT:    v_cndmask_b32_e32 v2, v2, v10, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v3, v3, v11, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v12
-; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
-; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
 ; GFX10-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX10-NEXT:    v_readfirstlane_b32 s3, v3
-; GFX10-NEXT:    v_cndmask_b32_e32 v4, v4, v10, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v5, v11, vcc_lo
-; GFX10-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v12
 ; GFX10-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX10-NEXT:    v_readfirstlane_b32 s5, v5
-; GFX10-NEXT:    v_cndmask_b32_e32 v6, v6, v10, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v7, v7, v11, vcc_lo
 ; GFX10-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX10-NEXT:    v_readfirstlane_b32 s7, v7
+; GFX10-NEXT:    v_readfirstlane_b32 s8, v8
+; GFX10-NEXT:    v_readfirstlane_b32 s9, v9
 ; GFX10-NEXT:    ; return to shader part epilog
 ;
 ; GFX11-LABEL: dyn_insertelement_v5f64_v_v_v:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v12
-; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 2, v12
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 1, v12
 ; GFX11-NEXT:    v_cmp_eq_u32_e64 s1, 4, v12
 ; GFX11-NEXT:    v_dual_cndmask_b32 v0, v0, v10 :: v_dual_cndmask_b32 v1, v1, v11
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 1, v12
-; GFX11-NEXT:    v_cndmask_b32_e64 v4, v4, v10, s0
-; GFX11-NEXT:    v_cndmask_b32_e64 v5, v5, v11, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v2, v2, v10, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 2, v12
+; GFX11-NEXT:    v_cndmask_b32_e64 v3, v3, v11, s0
+; GFX11-NEXT:    v_cmp_eq_u32_e64 s0, 3, v12
 ; GFX11-NEXT:    v_cndmask_b32_e64 v8, v8, v10, s1
 ; GFX11-NEXT:    v_cndmask_b32_e64 v9, v9, v11, s1
-; GFX11-NEXT:    v_dual_cndmask_b32 v2, v2, v10 :: v_dual_cndmask_b32 v3, v3, v11
-; GFX11-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 3, v12
+; GFX11-NEXT:    v_dual_cndmask_b32 v4, v4, v10 :: v_dual_cndmask_b32 v5, v5, v11
+; GFX11-NEXT:    v_cndmask_b32_e64 v6, v6, v10, s0
+; GFX11-NEXT:    v_cndmask_b32_e64 v7, v7, v11, s0
 ; GFX11-NEXT:    v_readfirstlane_b32 s0, v0
 ; GFX11-NEXT:    v_readfirstlane_b32 s1, v1
 ; GFX11-NEXT:    v_readfirstlane_b32 s2, v2
 ; GFX11-NEXT:    v_readfirstlane_b32 s3, v3
-; GFX11-NEXT:    v_dual_cndmask_b32 v6, v6, v10 :: v_dual_cndmask_b32 v7, v7, v11
 ; GFX11-NEXT:    v_readfirstlane_b32 s4, v4
 ; GFX11-NEXT:    v_readfirstlane_b32 s5, v5
-; GFX11-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX11-NEXT:    v_readfirstlane_b32 s6, v6
 ; GFX11-NEXT:    v_readfirstlane_b32 s7, v7
+; GFX11-NEXT:    v_readfirstlane_b32 s8, v8
 ; GFX11-NEXT:    v_readfirstlane_b32 s9, v9
 ; GFX11-NEXT:    ; return to shader part epilog
 entry:

diff  --git a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
index bf5843ea8047d5..60f61a67ccf0be 100644
--- a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
+++ b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
@@ -590,35 +590,35 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX908-NEXT:    v_readfirstlane_b32 s9, v3
 ; GFX908-NEXT:    s_add_u32 s5, s5, 1
 ; GFX908-NEXT:    s_addc_u32 s9, s9, 0
-; GFX908-NEXT:    s_mul_hi_u32 s21, s2, s5
-; GFX908-NEXT:    s_mul_i32 s22, s3, s5
-; GFX908-NEXT:    s_mul_i32 s20, s2, s5
-; GFX908-NEXT:    s_mul_i32 s5, s2, s9
-; GFX908-NEXT:    s_add_i32 s5, s21, s5
-; GFX908-NEXT:    s_add_i32 s5, s5, s22
+; GFX908-NEXT:    s_mul_hi_u32 s20, s2, s5
+; GFX908-NEXT:    s_mul_i32 s9, s2, s9
+; GFX908-NEXT:    s_mul_i32 s21, s3, s5
+; GFX908-NEXT:    s_add_i32 s9, s20, s9
+; GFX908-NEXT:    s_mul_i32 s5, s2, s5
+; GFX908-NEXT:    s_add_i32 s9, s9, s21
 ; GFX908-NEXT:    s_branch .LBB3_5
 ; GFX908-NEXT:  .LBB3_4: ; %bb58
 ; GFX908-NEXT:    ; in Loop: Header=BB3_5 Depth=2
 ; GFX908-NEXT:    v_add_co_u32_sdwa v2, vcc, v2, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
 ; GFX908-NEXT:    v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
 ; GFX908-NEXT:    s_add_u32 s18, s18, s14
-; GFX908-NEXT:    v_cmp_lt_i64_e64 s[24:25], -1, v[2:3]
+; GFX908-NEXT:    v_cmp_lt_i64_e64 s[22:23], -1, v[2:3]
 ; GFX908-NEXT:    s_addc_u32 s19, s19, s15
-; GFX908-NEXT:    s_mov_b64 s[22:23], 0
-; GFX908-NEXT:    s_andn2_b64 vcc, exec, s[24:25]
+; GFX908-NEXT:    s_mov_b64 s[20:21], 0
+; GFX908-NEXT:    s_andn2_b64 vcc, exec, s[22:23]
 ; GFX908-NEXT:    s_cbranch_vccz .LBB3_9
 ; GFX908-NEXT:  .LBB3_5: ; %bb16
 ; GFX908-NEXT:    ; Parent Loop BB3_2 Depth=1
 ; GFX908-NEXT:    ; => This Inner Loop Header: Depth=2
-; GFX908-NEXT:    s_add_u32 s22, s18, s20
-; GFX908-NEXT:    s_addc_u32 s23, s19, s5
-; GFX908-NEXT:    global_load_dword v21, v19, s[22:23] offset:-12 glc
+; GFX908-NEXT:    s_add_u32 s20, s18, s5
+; GFX908-NEXT:    s_addc_u32 s21, s19, s9
+; GFX908-NEXT:    global_load_dword v21, v19, s[20:21] offset:-12 glc
 ; GFX908-NEXT:    s_waitcnt vmcnt(0)
-; GFX908-NEXT:    global_load_dword v20, v19, s[22:23] offset:-8 glc
+; GFX908-NEXT:    global_load_dword v20, v19, s[20:21] offset:-8 glc
 ; GFX908-NEXT:    s_waitcnt vmcnt(0)
-; GFX908-NEXT:    global_load_dword v12, v19, s[22:23] offset:-4 glc
+; GFX908-NEXT:    global_load_dword v12, v19, s[20:21] offset:-4 glc
 ; GFX908-NEXT:    s_waitcnt vmcnt(0)
-; GFX908-NEXT:    global_load_dword v12, v19, s[22:23] glc
+; GFX908-NEXT:    global_load_dword v12, v19, s[20:21] glc
 ; GFX908-NEXT:    s_waitcnt vmcnt(0)
 ; GFX908-NEXT:    ds_read_b64 v[12:13], v19
 ; GFX908-NEXT:    ds_read_b64 v[14:15], v0
@@ -647,11 +647,11 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX908-NEXT:    v_add_f32_e32 v7, v7, v15
 ; GFX908-NEXT:    v_add_f32_e32 v10, v10, v12
 ; GFX908-NEXT:    v_add_f32_e32 v11, v11, v13
-; GFX908-NEXT:    s_mov_b64 s[22:23], -1
+; GFX908-NEXT:    s_mov_b64 s[20:21], -1
 ; GFX908-NEXT:    s_branch .LBB3_4
 ; GFX908-NEXT:  .LBB3_7: ; in Loop: Header=BB3_5 Depth=2
-; GFX908-NEXT:    s_mov_b64 s[22:23], s[16:17]
-; GFX908-NEXT:    s_andn2_b64 vcc, exec, s[22:23]
+; GFX908-NEXT:    s_mov_b64 s[20:21], s[16:17]
+; GFX908-NEXT:    s_andn2_b64 vcc, exec, s[20:21]
 ; GFX908-NEXT:    s_cbranch_vccz .LBB3_4
 ; GFX908-NEXT:  ; %bb.8: ; in Loop: Header=BB3_2 Depth=1
 ; GFX908-NEXT:    ; implicit-def: $vgpr10_vgpr11
@@ -662,7 +662,7 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX908-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; GFX908-NEXT:  .LBB3_9: ; %loop.exit.guard
 ; GFX908-NEXT:    ; in Loop: Header=BB3_2 Depth=1
-; GFX908-NEXT:    s_xor_b64 s[16:17], s[22:23], -1
+; GFX908-NEXT:    s_xor_b64 s[16:17], s[20:21], -1
 ; GFX908-NEXT:  .LBB3_10: ; %Flow19
 ; GFX908-NEXT:    ; in Loop: Header=BB3_2 Depth=1
 ; GFX908-NEXT:    s_mov_b64 s[0:1], -1
@@ -753,12 +753,12 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX90A-NEXT:    v_readfirstlane_b32 s9, v5
 ; GFX90A-NEXT:    s_add_u32 s5, s5, 1
 ; GFX90A-NEXT:    s_addc_u32 s9, s9, 0
-; GFX90A-NEXT:    s_mul_hi_u32 s21, s2, s5
-; GFX90A-NEXT:    s_mul_i32 s22, s3, s5
-; GFX90A-NEXT:    s_mul_i32 s20, s2, s5
-; GFX90A-NEXT:    s_mul_i32 s5, s2, s9
-; GFX90A-NEXT:    s_add_i32 s5, s21, s5
-; GFX90A-NEXT:    s_add_i32 s5, s5, s22
+; GFX90A-NEXT:    s_mul_hi_u32 s20, s2, s5
+; GFX90A-NEXT:    s_mul_i32 s9, s2, s9
+; GFX90A-NEXT:    s_mul_i32 s21, s3, s5
+; GFX90A-NEXT:    s_add_i32 s9, s20, s9
+; GFX90A-NEXT:    s_mul_i32 s5, s2, s5
+; GFX90A-NEXT:    s_add_i32 s9, s9, s21
 ; GFX90A-NEXT:    s_branch .LBB3_5
 ; GFX90A-NEXT:  .LBB3_4: ; %bb58
 ; GFX90A-NEXT:    ; in Loop: Header=BB3_5 Depth=2
@@ -766,27 +766,27 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX90A-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
 ; GFX90A-NEXT:    s_add_u32 s18, s18, s14
 ; GFX90A-NEXT:    s_addc_u32 s19, s19, s15
-; GFX90A-NEXT:    v_cmp_lt_i64_e64 s[24:25], -1, v[4:5]
-; GFX90A-NEXT:    s_mov_b64 s[22:23], 0
-; GFX90A-NEXT:    s_andn2_b64 vcc, exec, s[24:25]
+; GFX90A-NEXT:    v_cmp_lt_i64_e64 s[22:23], -1, v[4:5]
+; GFX90A-NEXT:    s_mov_b64 s[20:21], 0
+; GFX90A-NEXT:    s_andn2_b64 vcc, exec, s[22:23]
 ; GFX90A-NEXT:    s_cbranch_vccz .LBB3_9
 ; GFX90A-NEXT:  .LBB3_5: ; %bb16
 ; GFX90A-NEXT:    ; Parent Loop BB3_2 Depth=1
 ; GFX90A-NEXT:    ; => This Inner Loop Header: Depth=2
-; GFX90A-NEXT:    s_add_u32 s22, s18, s20
-; GFX90A-NEXT:    s_addc_u32 s23, s19, s5
-; GFX90A-NEXT:    global_load_dword v21, v19, s[22:23] offset:-12 glc
+; GFX90A-NEXT:    s_add_u32 s20, s18, s5
+; GFX90A-NEXT:    s_addc_u32 s21, s19, s9
+; GFX90A-NEXT:    global_load_dword v21, v19, s[20:21] offset:-12 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    global_load_dword v20, v19, s[22:23] offset:-8 glc
+; GFX90A-NEXT:    global_load_dword v20, v19, s[20:21] offset:-8 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    global_load_dword v14, v19, s[22:23] offset:-4 glc
+; GFX90A-NEXT:    global_load_dword v14, v19, s[20:21] offset:-4 glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
-; GFX90A-NEXT:    global_load_dword v14, v19, s[22:23] glc
+; GFX90A-NEXT:    global_load_dword v14, v19, s[20:21] glc
 ; GFX90A-NEXT:    s_waitcnt vmcnt(0)
 ; GFX90A-NEXT:    ds_read_b64 v[14:15], v19
 ; GFX90A-NEXT:    ds_read_b64 v[16:17], v0
 ; GFX90A-NEXT:    s_and_b64 vcc, exec, s[0:1]
-; GFX90A-NEXT:    ; kill: killed $sgpr22 killed $sgpr23
+; GFX90A-NEXT:    ; kill: killed $sgpr20 killed $sgpr21
 ; GFX90A-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX90A-NEXT:    s_cbranch_vccnz .LBB3_7
 ; GFX90A-NEXT:  ; %bb.6: ; %bb51
@@ -803,11 +803,11 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX90A-NEXT:    v_pk_add_f32 v[10:11], v[10:11], v[26:27]
 ; GFX90A-NEXT:    v_pk_add_f32 v[8:9], v[8:9], v[16:17]
 ; GFX90A-NEXT:    v_pk_add_f32 v[12:13], v[12:13], v[14:15]
-; GFX90A-NEXT:    s_mov_b64 s[22:23], -1
+; GFX90A-NEXT:    s_mov_b64 s[20:21], -1
 ; GFX90A-NEXT:    s_branch .LBB3_4
 ; GFX90A-NEXT:  .LBB3_7: ; in Loop: Header=BB3_5 Depth=2
-; GFX90A-NEXT:    s_mov_b64 s[22:23], s[16:17]
-; GFX90A-NEXT:    s_andn2_b64 vcc, exec, s[22:23]
+; GFX90A-NEXT:    s_mov_b64 s[20:21], s[16:17]
+; GFX90A-NEXT:    s_andn2_b64 vcc, exec, s[20:21]
 ; GFX90A-NEXT:    s_cbranch_vccz .LBB3_4
 ; GFX90A-NEXT:  ; %bb.8: ; in Loop: Header=BB3_2 Depth=1
 ; GFX90A-NEXT:    ; implicit-def: $vgpr12_vgpr13
@@ -818,7 +818,7 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
 ; GFX90A-NEXT:    ; implicit-def: $sgpr18_sgpr19
 ; GFX90A-NEXT:  .LBB3_9: ; %loop.exit.guard
 ; GFX90A-NEXT:    ; in Loop: Header=BB3_2 Depth=1
-; GFX90A-NEXT:    s_xor_b64 s[16:17], s[22:23], -1
+; GFX90A-NEXT:    s_xor_b64 s[16:17], s[20:21], -1
 ; GFX90A-NEXT:  .LBB3_10: ; %Flow19
 ; GFX90A-NEXT:    ; in Loop: Header=BB3_2 Depth=1
 ; GFX90A-NEXT:    s_mov_b64 s[0:1], -1

diff  --git a/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir b/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir
index 263d68294cd058..23590cad83271c 100644
--- a/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalesce-identity-copies-undef-subregs.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-coalescing -verify-machineinstrs -start-before=register-coalescer -stop-after=machine-scheduler -o - %s | FileCheck %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-coalescing -verify-machineinstrs -amdgpu-enable-rewrite-partial-reg-uses=false -start-before=register-coalescer -stop-after=machine-scheduler -o - %s | FileCheck %s
 
 # Tests that break due to the handling of partially undef registers
 # when whole register identity copies are erased.

diff  --git a/llvm/test/CodeGen/AMDGPU/dead-lane.mir b/llvm/test/CodeGen/AMDGPU/dead-lane.mir
index a18e647ad485a4..66944983c20c88 100644
--- a/llvm/test/CodeGen/AMDGPU/dead-lane.mir
+++ b/llvm/test/CodeGen/AMDGPU/dead-lane.mir
@@ -3,8 +3,8 @@
 
 # GCN-LABEL: name: dead_lane
 # GCN:      bb.0:
-# GCN-NEXT: undef %3.sub0:vreg_64 = nofpexcept V_MAC_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, undef %3.sub0, implicit $mode, implicit $exec
-# GCN-NEXT: FLAT_STORE_DWORD undef %4:vreg_64, %3.sub0,
+# GCN-NEXT: %5:vgpr_32 = nofpexcept V_MAC_F32_e32 undef %1:vgpr_32, undef %1:vgpr_32, undef %5, implicit $mode, implicit $exec
+# GCN-NEXT: FLAT_STORE_DWORD undef %4:vreg_64, %5,
 ---
 name:            dead_lane
 tracksRegLiveness: true

diff  --git a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
index 5a827584aa1062..c2c1af3143b43e 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_i64_system.ll
@@ -2652,15 +2652,15 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
 ; SI-LABEL: global_atomic_nand_i64_ret:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v4, v3
-; SI-NEXT:    v_mov_b32_e32 v5, v2
-; SI-NEXT:    v_mov_b32_e32 v7, v1
-; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    v_mov_b32_e32 v6, v3
+; SI-NEXT:    v_mov_b32_e32 v7, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
 ; SI-NEXT:    s_mov_b32 s6, 0
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s4, s6
 ; SI-NEXT:    s_mov_b32 s5, s6
-; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64
 ; SI-NEXT:    s_mov_b64 s[8:9], 0
 ; SI-NEXT:  .LBB42_1: ; %atomicrmw.start
 ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -2668,8 +2668,8 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
 ; SI-NEXT:    v_mov_b32_e32 v11, v1
 ; SI-NEXT:    v_mov_b32_e32 v10, v0
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v0, v11, v4
-; SI-NEXT:    v_and_b32_e32 v1, v10, v5
+; SI-NEXT:    v_and_b32_e32 v0, v11, v6
+; SI-NEXT:    v_and_b32_e32 v1, v10, v7
 ; SI-NEXT:    v_not_b32_e32 v9, v0
 ; SI-NEXT:    v_not_b32_e32 v8, v1
 ; SI-NEXT:    v_mov_b32_e32 v0, v8
@@ -2677,7 +2677,7 @@ define i64 @global_atomic_nand_i64_ret(ptr addrspace(1) %ptr, i64 %in) {
 ; SI-NEXT:    v_mov_b32_e32 v2, v10
 ; SI-NEXT:    v_mov_b32_e32 v3, v11
 ; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 glc
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 glc
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    buffer_wbinvl1
 ; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
@@ -2752,15 +2752,15 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
 ; SI-LABEL: global_atomic_nand_i64_ret_offset:
 ; SI:       ; %bb.0:
 ; SI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT:    v_mov_b32_e32 v4, v3
-; SI-NEXT:    v_mov_b32_e32 v5, v2
-; SI-NEXT:    v_mov_b32_e32 v7, v1
-; SI-NEXT:    v_mov_b32_e32 v6, v0
+; SI-NEXT:    v_mov_b32_e32 v6, v3
+; SI-NEXT:    v_mov_b32_e32 v7, v2
+; SI-NEXT:    v_mov_b32_e32 v5, v1
+; SI-NEXT:    v_mov_b32_e32 v4, v0
 ; SI-NEXT:    s_mov_b32 s6, 0
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s4, s6
 ; SI-NEXT:    s_mov_b32 s5, s6
-; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[6:7], s[4:7], 0 addr64 offset:32
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], v[4:5], s[4:7], 0 addr64 offset:32
 ; SI-NEXT:    s_mov_b64 s[8:9], 0
 ; SI-NEXT:  .LBB43_1: ; %atomicrmw.start
 ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
@@ -2768,8 +2768,8 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
 ; SI-NEXT:    v_mov_b32_e32 v11, v1
 ; SI-NEXT:    v_mov_b32_e32 v10, v0
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v0, v11, v4
-; SI-NEXT:    v_and_b32_e32 v1, v10, v5
+; SI-NEXT:    v_and_b32_e32 v0, v11, v6
+; SI-NEXT:    v_and_b32_e32 v1, v10, v7
 ; SI-NEXT:    v_not_b32_e32 v9, v0
 ; SI-NEXT:    v_not_b32_e32 v8, v1
 ; SI-NEXT:    v_mov_b32_e32 v0, v8
@@ -2777,7 +2777,7 @@ define i64 @global_atomic_nand_i64_ret_offset(ptr addrspace(1) %out, i64 %in) {
 ; SI-NEXT:    v_mov_b32_e32 v2, v10
 ; SI-NEXT:    v_mov_b32_e32 v3, v11
 ; SI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
-; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[6:7], s[4:7], 0 addr64 offset:32 glc
+; SI-NEXT:    buffer_atomic_cmpswap_x2 v[0:3], v[4:5], s[4:7], 0 addr64 offset:32 glc
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    buffer_wbinvl1
 ; SI-NEXT:    v_cmp_eq_u64_e32 vcc, v[0:1], v[10:11]
@@ -2859,8 +2859,8 @@ define amdgpu_gfx void @global_atomic_nand_i64_noret_scalar(ptr addrspace(1) inr
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_writelane_b32 v0, s6, 0
 ; SI-NEXT:    v_writelane_b32 v0, s7, 1
-; SI-NEXT:    s_mov_b32 s35, s7
-; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
@@ -2868,9 +2868,9 @@ define amdgpu_gfx void @global_atomic_nand_i64_noret_scalar(ptr addrspace(1) inr
 ; SI-NEXT:  .LBB44_1: ; %atomicrmw.start
 ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, s35, v4
+; SI-NEXT:    v_and_b32_e32 v1, s34, v4
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v5, s34, v3
+; SI-NEXT:    v_and_b32_e32 v5, s35, v3
 ; SI-NEXT:    v_not_b32_e32 v2, v1
 ; SI-NEXT:    v_not_b32_e32 v1, v5
 ; SI-NEXT:    v_mov_b32_e32 v8, v4
@@ -2967,8 +2967,8 @@ define amdgpu_gfx void @global_atomic_nand_i64_noret_offset_scalar(ptr addrspace
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_writelane_b32 v0, s6, 0
 ; SI-NEXT:    v_writelane_b32 v0, s7, 1
-; SI-NEXT:    s_mov_b32 s35, s7
-; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
@@ -2976,9 +2976,9 @@ define amdgpu_gfx void @global_atomic_nand_i64_noret_offset_scalar(ptr addrspace
 ; SI-NEXT:  .LBB45_1: ; %atomicrmw.start
 ; SI-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; SI-NEXT:    s_waitcnt vmcnt(0)
-; SI-NEXT:    v_and_b32_e32 v1, s35, v4
+; SI-NEXT:    v_and_b32_e32 v1, s34, v4
 ; SI-NEXT:    s_waitcnt expcnt(0)
-; SI-NEXT:    v_and_b32_e32 v5, s34, v3
+; SI-NEXT:    v_and_b32_e32 v5, s35, v3
 ; SI-NEXT:    v_not_b32_e32 v2, v1
 ; SI-NEXT:    v_not_b32_e32 v1, v5
 ; SI-NEXT:    v_mov_b32_e32 v8, v4
@@ -3076,8 +3076,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_writelane_b32 v2, s6, 0
 ; SI-NEXT:    v_writelane_b32 v2, s7, 1
-; SI-NEXT:    s_mov_b32 s35, s7
-; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0
@@ -3087,8 +3087,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_scalar(ptr addrspace(1) inreg
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v8, v4
 ; SI-NEXT:    v_mov_b32_e32 v7, v3
-; SI-NEXT:    v_and_b32_e32 v0, s35, v8
-; SI-NEXT:    v_and_b32_e32 v1, s34, v7
+; SI-NEXT:    v_and_b32_e32 v0, s34, v8
+; SI-NEXT:    v_and_b32_e32 v1, s35, v7
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_not_b32_e32 v6, v0
 ; SI-NEXT:    v_not_b32_e32 v5, v1
@@ -3186,8 +3186,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1)
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_writelane_b32 v2, s6, 0
 ; SI-NEXT:    v_writelane_b32 v2, s7, 1
-; SI-NEXT:    s_mov_b32 s35, s7
-; SI-NEXT:    s_mov_b32 s34, s6
+; SI-NEXT:    s_mov_b32 s34, s7
+; SI-NEXT:    s_mov_b32 s35, s6
 ; SI-NEXT:    s_mov_b32 s7, 0xf000
 ; SI-NEXT:    s_mov_b32 s6, -1
 ; SI-NEXT:    buffer_load_dwordx2 v[3:4], off, s[4:7], 0 offset:32
@@ -3197,8 +3197,8 @@ define amdgpu_gfx i64 @global_atomic_nand_i64_ret_offset_scalar(ptr addrspace(1)
 ; SI-NEXT:    s_waitcnt vmcnt(0)
 ; SI-NEXT:    v_mov_b32_e32 v8, v4
 ; SI-NEXT:    v_mov_b32_e32 v7, v3
-; SI-NEXT:    v_and_b32_e32 v0, s35, v8
-; SI-NEXT:    v_and_b32_e32 v1, s34, v7
+; SI-NEXT:    v_and_b32_e32 v0, s34, v8
+; SI-NEXT:    v_and_b32_e32 v1, s35, v7
 ; SI-NEXT:    s_waitcnt expcnt(0)
 ; SI-NEXT:    v_not_b32_e32 v6, v0
 ; SI-NEXT:    v_not_b32_e32 v5, v1

diff  --git a/llvm/test/CodeGen/AMDGPU/idiv-licm.ll b/llvm/test/CodeGen/AMDGPU/idiv-licm.ll
index 8382624af5d749..637fcd0a1e6b6f 100644
--- a/llvm/test/CodeGen/AMDGPU/idiv-licm.ll
+++ b/llvm/test/CodeGen/AMDGPU/idiv-licm.ll
@@ -6,41 +6,41 @@
 define amdgpu_kernel void @udiv32_invariant_denom(ptr addrspace(1) nocapture %arg, i32 %arg1) {
 ; GFX9-LABEL: udiv32_invariant_denom:
 ; GFX9:       ; %bb.0: ; %bb
-; GFX9-NEXT:    s_load_dword s5, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b32 s8, 0
+; GFX9-NEXT:    s_load_dword s6, s[0:1], 0x2c
+; GFX9-NEXT:    s_mov_b32 s7, 0
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX9-NEXT:    s_mov_b64 s[2:3], 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GFX9-NEXT:    s_sub_i32 s4, 0, s5
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX9-NEXT:    s_sub_i32 s4, 0, s6
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
-; GFX9-NEXT:    v_readfirstlane_b32 s6, v1
-; GFX9-NEXT:    s_mul_i32 s4, s4, s6
-; GFX9-NEXT:    s_mul_hi_u32 s4, s6, s4
-; GFX9-NEXT:    s_add_i32 s4, s6, s4
-; GFX9-NEXT:    s_mov_b64 s[6:7], 0
+; GFX9-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX9-NEXT:    s_mul_i32 s4, s4, s5
+; GFX9-NEXT:    s_mul_hi_u32 s4, s5, s4
+; GFX9-NEXT:    s_add_i32 s8, s5, s4
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9-NEXT:  .LBB0_1: ; %bb3
 ; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    s_not_b32 s10, s7
-; GFX9-NEXT:    s_mul_i32 s9, s5, s7
-; GFX9-NEXT:    s_mul_i32 s10, s5, s10
-; GFX9-NEXT:    s_add_i32 s11, s7, 1
-; GFX9-NEXT:    s_sub_i32 s9, s8, s9
-; GFX9-NEXT:    s_add_i32 s10, s8, s10
-; GFX9-NEXT:    s_cmp_ge_u32 s9, s5
-; GFX9-NEXT:    s_cselect_b32 s11, s11, s7
+; GFX9-NEXT:    s_not_b32 s10, s5
+; GFX9-NEXT:    s_mul_i32 s9, s6, s5
+; GFX9-NEXT:    s_mul_i32 s10, s6, s10
+; GFX9-NEXT:    s_add_i32 s11, s5, 1
+; GFX9-NEXT:    s_sub_i32 s9, s7, s9
+; GFX9-NEXT:    s_add_i32 s10, s7, s10
+; GFX9-NEXT:    s_cmp_ge_u32 s9, s6
+; GFX9-NEXT:    s_cselect_b32 s11, s11, s5
 ; GFX9-NEXT:    s_cselect_b32 s9, s10, s9
 ; GFX9-NEXT:    s_add_i32 s10, s11, 1
-; GFX9-NEXT:    s_cmp_ge_u32 s9, s5
+; GFX9-NEXT:    s_cmp_ge_u32 s9, s6
 ; GFX9-NEXT:    s_cselect_b32 s9, s10, s11
 ; GFX9-NEXT:    s_add_u32 s10, s0, s2
 ; GFX9-NEXT:    s_addc_u32 s11, s1, s3
-; GFX9-NEXT:    s_add_i32 s8, s8, 1
-; GFX9-NEXT:    s_add_u32 s6, s6, s4
-; GFX9-NEXT:    s_addc_u32 s7, s7, 0
+; GFX9-NEXT:    s_add_i32 s7, s7, 1
+; GFX9-NEXT:    s_add_u32 s4, s4, s8
+; GFX9-NEXT:    s_addc_u32 s5, s5, 0
 ; GFX9-NEXT:    s_add_u32 s2, s2, 4
 ; GFX9-NEXT:    s_addc_u32 s3, s3, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s9
@@ -52,43 +52,43 @@ define amdgpu_kernel void @udiv32_invariant_denom(ptr addrspace(1) nocapture %ar
 ;
 ; GFX10-LABEL: udiv32_invariant_denom:
 ; GFX10:       ; %bb.0: ; %bb
-; GFX10-NEXT:    s_load_dword s5, s[0:1], 0x2c
-; GFX10-NEXT:    s_mov_b32 s8, 0
+; GFX10-NEXT:    s_load_dword s6, s[0:1], 0x2c
+; GFX10-NEXT:    s_mov_b32 s7, 0
 ; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GFX10-NEXT:    s_sub_i32 s2, 0, s5
+; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX10-NEXT:    s_sub_i32 s2, 0, s6
 ; GFX10-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX10-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX10-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX10-NEXT:    v_readfirstlane_b32 s4, v0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX10-NEXT:    s_mul_i32 s2, s2, s4
-; GFX10-NEXT:    s_mul_hi_u32 s6, s4, s2
+; GFX10-NEXT:    s_mul_hi_u32 s5, s4, s2
 ; GFX10-NEXT:    s_mov_b64 s[2:3], 0
-; GFX10-NEXT:    s_add_i32 s4, s4, s6
-; GFX10-NEXT:    s_mov_b64 s[6:7], 0
+; GFX10-NEXT:    s_add_i32 s8, s4, s5
+; GFX10-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX10-NEXT:  .LBB0_1: ; %bb3
 ; GFX10-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX10-NEXT:    s_not_b32 s10, s7
-; GFX10-NEXT:    s_mul_i32 s9, s5, s7
-; GFX10-NEXT:    s_mul_i32 s10, s5, s10
-; GFX10-NEXT:    s_sub_i32 s9, s8, s9
-; GFX10-NEXT:    s_add_i32 s11, s7, 1
-; GFX10-NEXT:    s_add_i32 s10, s8, s10
-; GFX10-NEXT:    s_cmp_ge_u32 s9, s5
-; GFX10-NEXT:    s_cselect_b32 s11, s11, s7
+; GFX10-NEXT:    s_not_b32 s10, s5
+; GFX10-NEXT:    s_mul_i32 s9, s6, s5
+; GFX10-NEXT:    s_mul_i32 s10, s6, s10
+; GFX10-NEXT:    s_sub_i32 s9, s7, s9
+; GFX10-NEXT:    s_add_i32 s11, s5, 1
+; GFX10-NEXT:    s_add_i32 s10, s7, s10
+; GFX10-NEXT:    s_cmp_ge_u32 s9, s6
+; GFX10-NEXT:    s_cselect_b32 s11, s11, s5
 ; GFX10-NEXT:    s_cselect_b32 s9, s10, s9
 ; GFX10-NEXT:    s_add_i32 s10, s11, 1
-; GFX10-NEXT:    s_cmp_ge_u32 s9, s5
+; GFX10-NEXT:    s_cmp_ge_u32 s9, s6
 ; GFX10-NEXT:    s_cselect_b32 s9, s10, s11
 ; GFX10-NEXT:    s_add_u32 s10, s0, s2
 ; GFX10-NEXT:    s_addc_u32 s11, s1, s3
-; GFX10-NEXT:    s_add_i32 s8, s8, 1
-; GFX10-NEXT:    s_add_u32 s6, s6, s4
+; GFX10-NEXT:    s_add_i32 s7, s7, 1
+; GFX10-NEXT:    s_add_u32 s4, s4, s8
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s9
-; GFX10-NEXT:    s_addc_u32 s7, s7, 0
+; GFX10-NEXT:    s_addc_u32 s5, s5, 0
 ; GFX10-NEXT:    s_add_u32 s2, s2, 4
 ; GFX10-NEXT:    s_addc_u32 s3, s3, 0
 ; GFX10-NEXT:    s_cmpk_eq_i32 s2, 0x1000
@@ -100,12 +100,12 @@ define amdgpu_kernel void @udiv32_invariant_denom(ptr addrspace(1) nocapture %ar
 ; GFX11-LABEL: udiv32_invariant_denom:
 ; GFX11:       ; %bb.0: ; %bb
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    s_load_b32 s5, s[0:1], 0x2c
+; GFX11-NEXT:    s_load_b32 s6, s[0:1], 0x2c
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
-; GFX11-NEXT:    s_mov_b32 s8, 0
+; GFX11-NEXT:    s_mov_b32 s7, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GFX11-NEXT:    s_sub_i32 s2, 0, s5
+; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX11-NEXT:    s_sub_i32 s2, 0, s6
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
@@ -116,32 +116,32 @@ define amdgpu_kernel void @udiv32_invariant_denom(ptr addrspace(1) nocapture %ar
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX11-NEXT:    s_mul_i32 s2, s2, s4
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_mul_hi_u32 s6, s4, s2
+; GFX11-NEXT:    s_mul_hi_u32 s5, s4, s2
 ; GFX11-NEXT:    s_mov_b64 s[2:3], 0
-; GFX11-NEXT:    s_add_i32 s4, s4, s6
-; GFX11-NEXT:    s_mov_b64 s[6:7], 0
+; GFX11-NEXT:    s_add_i32 s8, s4, s5
+; GFX11-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX11-NEXT:    .p2align 6
 ; GFX11-NEXT:  .LBB0_1: ; %bb3
 ; GFX11-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_not_b32 s10, s7
-; GFX11-NEXT:    s_mul_i32 s9, s5, s7
-; GFX11-NEXT:    s_mul_i32 s10, s5, s10
-; GFX11-NEXT:    s_sub_i32 s9, s8, s9
-; GFX11-NEXT:    s_add_i32 s11, s7, 1
-; GFX11-NEXT:    s_add_i32 s10, s8, s10
-; GFX11-NEXT:    s_cmp_ge_u32 s9, s5
-; GFX11-NEXT:    s_cselect_b32 s11, s11, s7
+; GFX11-NEXT:    s_not_b32 s10, s5
+; GFX11-NEXT:    s_mul_i32 s9, s6, s5
+; GFX11-NEXT:    s_mul_i32 s10, s6, s10
+; GFX11-NEXT:    s_sub_i32 s9, s7, s9
+; GFX11-NEXT:    s_add_i32 s11, s5, 1
+; GFX11-NEXT:    s_add_i32 s10, s7, s10
+; GFX11-NEXT:    s_cmp_ge_u32 s9, s6
+; GFX11-NEXT:    s_cselect_b32 s11, s11, s5
 ; GFX11-NEXT:    s_cselect_b32 s9, s10, s9
 ; GFX11-NEXT:    s_add_i32 s10, s11, 1
-; GFX11-NEXT:    s_cmp_ge_u32 s9, s5
+; GFX11-NEXT:    s_cmp_ge_u32 s9, s6
 ; GFX11-NEXT:    s_cselect_b32 s9, s10, s11
 ; GFX11-NEXT:    s_add_u32 s10, s0, s2
 ; GFX11-NEXT:    s_addc_u32 s11, s1, s3
-; GFX11-NEXT:    s_add_i32 s8, s8, 1
-; GFX11-NEXT:    s_add_u32 s6, s6, s4
+; GFX11-NEXT:    s_add_i32 s7, s7, 1
+; GFX11-NEXT:    s_add_u32 s4, s4, s8
 ; GFX11-NEXT:    v_mov_b32_e32 v1, s9
-; GFX11-NEXT:    s_addc_u32 s7, s7, 0
+; GFX11-NEXT:    s_addc_u32 s5, s5, 0
 ; GFX11-NEXT:    s_add_u32 s2, s2, 4
 ; GFX11-NEXT:    s_addc_u32 s3, s3, 0
 ; GFX11-NEXT:    s_cmpk_eq_i32 s2, 0x1000
@@ -171,39 +171,39 @@ bb3:                                              ; preds = %bb3, %bb
 define amdgpu_kernel void @urem32_invariant_denom(ptr addrspace(1) nocapture %arg, i32 %arg1) {
 ; GFX9-LABEL: urem32_invariant_denom:
 ; GFX9:       ; %bb.0: ; %bb
-; GFX9-NEXT:    s_load_dword s5, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b32 s8, 0
+; GFX9-NEXT:    s_load_dword s6, s[0:1], 0x2c
+; GFX9-NEXT:    s_mov_b32 s7, 0
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX9-NEXT:    s_mov_b64 s[2:3], 0
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GFX9-NEXT:    s_sub_i32 s4, 0, s5
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX9-NEXT:    s_sub_i32 s4, 0, s6
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v1, v0
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
-; GFX9-NEXT:    v_readfirstlane_b32 s6, v1
-; GFX9-NEXT:    s_mul_i32 s4, s4, s6
-; GFX9-NEXT:    s_mul_hi_u32 s4, s6, s4
-; GFX9-NEXT:    s_add_i32 s4, s6, s4
-; GFX9-NEXT:    s_mov_b64 s[6:7], 0
+; GFX9-NEXT:    v_readfirstlane_b32 s5, v1
+; GFX9-NEXT:    s_mul_i32 s4, s4, s5
+; GFX9-NEXT:    s_mul_hi_u32 s4, s5, s4
+; GFX9-NEXT:    s_add_i32 s8, s5, s4
+; GFX9-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX9-NEXT:  .LBB1_1: ; %bb3
 ; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    s_not_b32 s10, s7
-; GFX9-NEXT:    s_mul_i32 s9, s5, s7
-; GFX9-NEXT:    s_mul_i32 s10, s5, s10
-; GFX9-NEXT:    s_sub_i32 s9, s8, s9
-; GFX9-NEXT:    s_add_i32 s10, s8, s10
-; GFX9-NEXT:    s_cmp_ge_u32 s9, s5
+; GFX9-NEXT:    s_not_b32 s10, s5
+; GFX9-NEXT:    s_mul_i32 s9, s6, s5
+; GFX9-NEXT:    s_mul_i32 s10, s6, s10
+; GFX9-NEXT:    s_sub_i32 s9, s7, s9
+; GFX9-NEXT:    s_add_i32 s10, s7, s10
+; GFX9-NEXT:    s_cmp_ge_u32 s9, s6
 ; GFX9-NEXT:    s_cselect_b32 s9, s10, s9
-; GFX9-NEXT:    s_sub_i32 s10, s9, s5
-; GFX9-NEXT:    s_cmp_ge_u32 s9, s5
+; GFX9-NEXT:    s_sub_i32 s10, s9, s6
+; GFX9-NEXT:    s_cmp_ge_u32 s9, s6
 ; GFX9-NEXT:    s_cselect_b32 s9, s10, s9
 ; GFX9-NEXT:    s_add_u32 s10, s0, s2
 ; GFX9-NEXT:    s_addc_u32 s11, s1, s3
-; GFX9-NEXT:    s_add_i32 s8, s8, 1
-; GFX9-NEXT:    s_add_u32 s6, s6, s4
-; GFX9-NEXT:    s_addc_u32 s7, s7, 0
+; GFX9-NEXT:    s_add_i32 s7, s7, 1
+; GFX9-NEXT:    s_add_u32 s4, s4, s8
+; GFX9-NEXT:    s_addc_u32 s5, s5, 0
 ; GFX9-NEXT:    s_add_u32 s2, s2, 4
 ; GFX9-NEXT:    s_addc_u32 s3, s3, 0
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s9
@@ -215,41 +215,41 @@ define amdgpu_kernel void @urem32_invariant_denom(ptr addrspace(1) nocapture %ar
 ;
 ; GFX10-LABEL: urem32_invariant_denom:
 ; GFX10:       ; %bb.0: ; %bb
-; GFX10-NEXT:    s_load_dword s5, s[0:1], 0x2c
-; GFX10-NEXT:    s_mov_b32 s8, 0
+; GFX10-NEXT:    s_load_dword s6, s[0:1], 0x2c
+; GFX10-NEXT:    s_mov_b32 s7, 0
 ; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GFX10-NEXT:    s_sub_i32 s2, 0, s5
+; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX10-NEXT:    s_sub_i32 s2, 0, s6
 ; GFX10-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX10-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX10-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX10-NEXT:    v_readfirstlane_b32 s4, v0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX10-NEXT:    s_mul_i32 s2, s2, s4
-; GFX10-NEXT:    s_mul_hi_u32 s6, s4, s2
+; GFX10-NEXT:    s_mul_hi_u32 s5, s4, s2
 ; GFX10-NEXT:    s_mov_b64 s[2:3], 0
-; GFX10-NEXT:    s_add_i32 s4, s4, s6
-; GFX10-NEXT:    s_mov_b64 s[6:7], 0
+; GFX10-NEXT:    s_add_i32 s8, s4, s5
+; GFX10-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX10-NEXT:  .LBB1_1: ; %bb3
 ; GFX10-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT:    s_not_b32 s9, s7
+; GFX10-NEXT:    s_not_b32 s9, s5
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
-; GFX10-NEXT:    s_mul_i32 s10, s5, s7
-; GFX10-NEXT:    s_mul_i32 s9, s5, s9
-; GFX10-NEXT:    s_sub_i32 s10, s8, s10
-; GFX10-NEXT:    s_add_i32 s9, s8, s9
-; GFX10-NEXT:    s_cmp_ge_u32 s10, s5
+; GFX10-NEXT:    s_mul_i32 s10, s6, s5
+; GFX10-NEXT:    s_mul_i32 s9, s6, s9
+; GFX10-NEXT:    s_sub_i32 s10, s7, s10
+; GFX10-NEXT:    s_add_i32 s9, s7, s9
+; GFX10-NEXT:    s_cmp_ge_u32 s10, s6
 ; GFX10-NEXT:    s_cselect_b32 s9, s9, s10
-; GFX10-NEXT:    s_sub_i32 s10, s9, s5
-; GFX10-NEXT:    s_cmp_ge_u32 s9, s5
+; GFX10-NEXT:    s_sub_i32 s10, s9, s6
+; GFX10-NEXT:    s_cmp_ge_u32 s9, s6
 ; GFX10-NEXT:    s_cselect_b32 s9, s10, s9
 ; GFX10-NEXT:    s_add_u32 s10, s0, s2
 ; GFX10-NEXT:    s_addc_u32 s11, s1, s3
-; GFX10-NEXT:    s_add_i32 s8, s8, 1
-; GFX10-NEXT:    s_add_u32 s6, s6, s4
+; GFX10-NEXT:    s_add_i32 s7, s7, 1
+; GFX10-NEXT:    s_add_u32 s4, s4, s8
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s9
-; GFX10-NEXT:    s_addc_u32 s7, s7, 0
+; GFX10-NEXT:    s_addc_u32 s5, s5, 0
 ; GFX10-NEXT:    s_add_u32 s2, s2, 4
 ; GFX10-NEXT:    s_addc_u32 s3, s3, 0
 ; GFX10-NEXT:    s_cmpk_eq_i32 s2, 0x1000
@@ -261,12 +261,12 @@ define amdgpu_kernel void @urem32_invariant_denom(ptr addrspace(1) nocapture %ar
 ; GFX11-LABEL: urem32_invariant_denom:
 ; GFX11:       ; %bb.0: ; %bb
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    s_load_b32 s5, s[0:1], 0x2c
+; GFX11-NEXT:    s_load_b32 s6, s[0:1], 0x2c
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
-; GFX11-NEXT:    s_mov_b32 s8, 0
+; GFX11-NEXT:    s_mov_b32 s7, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s5
-; GFX11-NEXT:    s_sub_i32 s2, 0, s5
+; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s6
+; GFX11-NEXT:    s_sub_i32 s2, 0, s6
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
@@ -277,31 +277,31 @@ define amdgpu_kernel void @urem32_invariant_denom(ptr addrspace(1) nocapture %ar
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX11-NEXT:    s_mul_i32 s2, s2, s4
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_mul_hi_u32 s6, s4, s2
+; GFX11-NEXT:    s_mul_hi_u32 s5, s4, s2
 ; GFX11-NEXT:    s_mov_b64 s[2:3], 0
-; GFX11-NEXT:    s_add_i32 s4, s4, s6
-; GFX11-NEXT:    s_mov_b64 s[6:7], 0
+; GFX11-NEXT:    s_add_i32 s8, s4, s5
+; GFX11-NEXT:    s_mov_b64 s[4:5], 0
 ; GFX11-NEXT:    .p2align 6
 ; GFX11-NEXT:  .LBB1_1: ; %bb3
 ; GFX11-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_not_b32 s9, s7
-; GFX11-NEXT:    s_mul_i32 s10, s5, s7
-; GFX11-NEXT:    s_mul_i32 s9, s5, s9
-; GFX11-NEXT:    s_sub_i32 s10, s8, s10
-; GFX11-NEXT:    s_add_i32 s9, s8, s9
-; GFX11-NEXT:    s_cmp_ge_u32 s10, s5
+; GFX11-NEXT:    s_not_b32 s9, s5
+; GFX11-NEXT:    s_mul_i32 s10, s6, s5
+; GFX11-NEXT:    s_mul_i32 s9, s6, s9
+; GFX11-NEXT:    s_sub_i32 s10, s7, s10
+; GFX11-NEXT:    s_add_i32 s9, s7, s9
+; GFX11-NEXT:    s_cmp_ge_u32 s10, s6
 ; GFX11-NEXT:    s_cselect_b32 s9, s9, s10
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_sub_i32 s10, s9, s5
-; GFX11-NEXT:    s_cmp_ge_u32 s9, s5
+; GFX11-NEXT:    s_sub_i32 s10, s9, s6
+; GFX11-NEXT:    s_cmp_ge_u32 s9, s6
 ; GFX11-NEXT:    s_cselect_b32 s9, s10, s9
 ; GFX11-NEXT:    s_add_u32 s10, s0, s2
 ; GFX11-NEXT:    s_addc_u32 s11, s1, s3
-; GFX11-NEXT:    s_add_i32 s8, s8, 1
-; GFX11-NEXT:    s_add_u32 s6, s6, s4
+; GFX11-NEXT:    s_add_i32 s7, s7, 1
+; GFX11-NEXT:    s_add_u32 s4, s4, s8
 ; GFX11-NEXT:    v_mov_b32_e32 v1, s9
-; GFX11-NEXT:    s_addc_u32 s7, s7, 0
+; GFX11-NEXT:    s_addc_u32 s5, s5, 0
 ; GFX11-NEXT:    s_add_u32 s2, s2, 4
 ; GFX11-NEXT:    s_addc_u32 s3, s3, 0
 ; GFX11-NEXT:    s_cmpk_eq_i32 s2, 0x1000
@@ -331,90 +331,90 @@ bb3:                                              ; preds = %bb3, %bb
 define amdgpu_kernel void @sdiv32_invariant_denom(ptr addrspace(1) nocapture %arg, i32 %arg1) {
 ; GFX9-LABEL: sdiv32_invariant_denom:
 ; GFX9:       ; %bb.0: ; %bb
-; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b32 s5, 0
+; GFX9-NEXT:    s_load_dword s3, s[0:1], 0x2c
+; GFX9-NEXT:    s_mov_b32 s4, 0
 ; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-NEXT:    s_ashr_i32 s3, s2, 31
-; GFX9-NEXT:    s_add_i32 s2, s2, s3
-; GFX9-NEXT:    s_xor_b32 s4, s2, s3
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX9-NEXT:    s_sub_i32 s2, 0, s4
+; GFX9-NEXT:    s_ashr_i32 s2, s3, 31
+; GFX9-NEXT:    s_add_i32 s3, s3, s2
+; GFX9-NEXT:    s_xor_b32 s3, s3, s2
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
+; GFX9-NEXT:    s_sub_i32 s5, 0, s3
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX9-NEXT:    v_readfirstlane_b32 s6, v0
-; GFX9-NEXT:    s_mul_i32 s2, s2, s6
-; GFX9-NEXT:    s_mul_hi_u32 s2, s6, s2
-; GFX9-NEXT:    s_add_i32 s2, s6, s2
+; GFX9-NEXT:    s_mul_i32 s5, s5, s6
+; GFX9-NEXT:    s_mul_hi_u32 s5, s6, s5
+; GFX9-NEXT:    s_add_i32 s5, s6, s5
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX9-NEXT:  .LBB2_1: ; %bb3
 ; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    s_mul_hi_u32 s6, s5, s2
-; GFX9-NEXT:    s_mul_i32 s7, s6, s4
-; GFX9-NEXT:    s_sub_i32 s7, s5, s7
+; GFX9-NEXT:    s_mul_hi_u32 s6, s4, s5
+; GFX9-NEXT:    s_mul_i32 s7, s6, s3
+; GFX9-NEXT:    s_sub_i32 s7, s4, s7
 ; GFX9-NEXT:    s_add_i32 s8, s6, 1
-; GFX9-NEXT:    s_sub_i32 s9, s7, s4
-; GFX9-NEXT:    s_cmp_ge_u32 s7, s4
+; GFX9-NEXT:    s_sub_i32 s9, s7, s3
+; GFX9-NEXT:    s_cmp_ge_u32 s7, s3
 ; GFX9-NEXT:    s_cselect_b32 s6, s8, s6
 ; GFX9-NEXT:    s_cselect_b32 s7, s9, s7
 ; GFX9-NEXT:    s_add_i32 s8, s6, 1
-; GFX9-NEXT:    s_cmp_ge_u32 s7, s4
+; GFX9-NEXT:    s_cmp_ge_u32 s7, s3
 ; GFX9-NEXT:    s_cselect_b32 s6, s8, s6
-; GFX9-NEXT:    s_xor_b32 s6, s6, s3
-; GFX9-NEXT:    s_sub_i32 s6, s6, s3
-; GFX9-NEXT:    s_add_i32 s5, s5, 1
+; GFX9-NEXT:    s_xor_b32 s6, s6, s2
+; GFX9-NEXT:    s_sub_i32 s6, s6, s2
+; GFX9-NEXT:    s_add_i32 s4, s4, 1
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s6
 ; GFX9-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX9-NEXT:    s_add_u32 s0, s0, 4
 ; GFX9-NEXT:    s_addc_u32 s1, s1, 0
-; GFX9-NEXT:    s_cmpk_eq_i32 s5, 0x400
+; GFX9-NEXT:    s_cmpk_eq_i32 s4, 0x400
 ; GFX9-NEXT:    s_cbranch_scc0 .LBB2_1
 ; GFX9-NEXT:  ; %bb.2: ; %bb2
 ; GFX9-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: sdiv32_invariant_denom:
 ; GFX10:       ; %bb.0: ; %bb
-; GFX10-NEXT:    s_load_dword s2, s[0:1], 0x2c
+; GFX10-NEXT:    s_load_dword s3, s[0:1], 0x2c
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX10-NEXT:    s_ashr_i32 s3, s2, 31
+; GFX10-NEXT:    s_ashr_i32 s2, s3, 31
 ; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
-; GFX10-NEXT:    s_add_i32 s2, s2, s3
-; GFX10-NEXT:    s_xor_b32 s4, s2, s3
-; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX10-NEXT:    s_sub_i32 s5, 0, s4
+; GFX10-NEXT:    s_add_i32 s3, s3, s2
+; GFX10-NEXT:    s_xor_b32 s3, s3, s2
+; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, s3
+; GFX10-NEXT:    s_sub_i32 s4, 0, s3
 ; GFX10-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX10-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX10-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX10-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s5, v0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
-; GFX10-NEXT:    s_mul_i32 s5, s5, s2
-; GFX10-NEXT:    s_mul_hi_u32 s6, s2, s5
-; GFX10-NEXT:    s_mov_b32 s5, 0
-; GFX10-NEXT:    s_add_i32 s2, s2, s6
+; GFX10-NEXT:    s_mul_i32 s4, s4, s5
+; GFX10-NEXT:    s_mul_hi_u32 s6, s5, s4
+; GFX10-NEXT:    s_mov_b32 s4, 0
+; GFX10-NEXT:    s_add_i32 s5, s5, s6
 ; GFX10-NEXT:  .LBB2_1: ; %bb3
 ; GFX10-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT:    s_mul_hi_u32 s6, s5, s2
-; GFX10-NEXT:    s_mul_i32 s7, s6, s4
+; GFX10-NEXT:    s_mul_hi_u32 s6, s4, s5
+; GFX10-NEXT:    s_mul_i32 s7, s6, s3
 ; GFX10-NEXT:    s_add_i32 s8, s6, 1
-; GFX10-NEXT:    s_sub_i32 s7, s5, s7
-; GFX10-NEXT:    s_sub_i32 s9, s7, s4
-; GFX10-NEXT:    s_cmp_ge_u32 s7, s4
+; GFX10-NEXT:    s_sub_i32 s7, s4, s7
+; GFX10-NEXT:    s_sub_i32 s9, s7, s3
+; GFX10-NEXT:    s_cmp_ge_u32 s7, s3
 ; GFX10-NEXT:    s_cselect_b32 s6, s8, s6
 ; GFX10-NEXT:    s_cselect_b32 s7, s9, s7
 ; GFX10-NEXT:    s_add_i32 s8, s6, 1
-; GFX10-NEXT:    s_cmp_ge_u32 s7, s4
+; GFX10-NEXT:    s_cmp_ge_u32 s7, s3
 ; GFX10-NEXT:    s_cselect_b32 s6, s8, s6
-; GFX10-NEXT:    s_add_i32 s5, s5, 1
-; GFX10-NEXT:    s_xor_b32 s6, s6, s3
-; GFX10-NEXT:    s_sub_i32 s6, s6, s3
+; GFX10-NEXT:    s_add_i32 s4, s4, 1
+; GFX10-NEXT:    s_xor_b32 s6, s6, s2
+; GFX10-NEXT:    s_sub_i32 s6, s6, s2
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s6
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_add_u32 s0, s0, 4
 ; GFX10-NEXT:    s_addc_u32 s1, s1, 0
-; GFX10-NEXT:    s_cmpk_eq_i32 s5, 0x400
+; GFX10-NEXT:    s_cmpk_eq_i32 s4, 0x400
 ; GFX10-NEXT:    s_cbranch_scc0 .LBB2_1
 ; GFX10-NEXT:  ; %bb.2: ; %bb2
 ; GFX10-NEXT:    s_endpgm
@@ -422,53 +422,53 @@ define amdgpu_kernel void @sdiv32_invariant_denom(ptr addrspace(1) nocapture %ar
 ; GFX11-LABEL: sdiv32_invariant_denom:
 ; GFX11:       ; %bb.0: ; %bb
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    s_load_b32 s2, s[0:1], 0x2c
+; GFX11-NEXT:    s_load_b32 s3, s[0:1], 0x2c
 ; GFX11-NEXT:    s_load_b64 s[0:1], s[0:1], 0x24
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX11-NEXT:    s_ashr_i32 s3, s2, 31
+; GFX11-NEXT:    s_ashr_i32 s2, s3, 31
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_add_i32 s2, s2, s3
-; GFX11-NEXT:    s_xor_b32 s4, s2, s3
+; GFX11-NEXT:    s_add_i32 s3, s3, s2
+; GFX11-NEXT:    s_xor_b32 s3, s3, s2
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s4
-; GFX11-NEXT:    s_sub_i32 s5, 0, s4
+; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s3
+; GFX11-NEXT:    s_sub_i32 s4, 0, s3
 ; GFX11-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
 ; GFX11-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX11-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s5, v0
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_mul_i32 s5, s5, s2
-; GFX11-NEXT:    s_mul_hi_u32 s6, s2, s5
-; GFX11-NEXT:    s_mov_b32 s5, 0
-; GFX11-NEXT:    s_add_i32 s2, s2, s6
+; GFX11-NEXT:    s_mul_i32 s4, s4, s5
+; GFX11-NEXT:    s_mul_hi_u32 s6, s5, s4
+; GFX11-NEXT:    s_mov_b32 s4, 0
+; GFX11-NEXT:    s_add_i32 s5, s5, s6
 ; GFX11-NEXT:    .p2align 6
 ; GFX11-NEXT:  .LBB2_1: ; %bb3
 ; GFX11-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_mul_hi_u32 s6, s5, s2
-; GFX11-NEXT:    s_mul_i32 s7, s6, s4
+; GFX11-NEXT:    s_mul_hi_u32 s6, s4, s5
+; GFX11-NEXT:    s_mul_i32 s7, s6, s3
 ; GFX11-NEXT:    s_add_i32 s8, s6, 1
-; GFX11-NEXT:    s_sub_i32 s7, s5, s7
+; GFX11-NEXT:    s_sub_i32 s7, s4, s7
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_sub_i32 s9, s7, s4
-; GFX11-NEXT:    s_cmp_ge_u32 s7, s4
+; GFX11-NEXT:    s_sub_i32 s9, s7, s3
+; GFX11-NEXT:    s_cmp_ge_u32 s7, s3
 ; GFX11-NEXT:    s_cselect_b32 s6, s8, s6
 ; GFX11-NEXT:    s_cselect_b32 s7, s9, s7
 ; GFX11-NEXT:    s_add_i32 s8, s6, 1
-; GFX11-NEXT:    s_cmp_ge_u32 s7, s4
+; GFX11-NEXT:    s_cmp_ge_u32 s7, s3
 ; GFX11-NEXT:    s_cselect_b32 s6, s8, s6
-; GFX11-NEXT:    s_add_i32 s5, s5, 1
-; GFX11-NEXT:    s_xor_b32 s6, s6, s3
+; GFX11-NEXT:    s_add_i32 s4, s4, 1
+; GFX11-NEXT:    s_xor_b32 s6, s6, s2
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_sub_i32 s6, s6, s3
+; GFX11-NEXT:    s_sub_i32 s6, s6, s2
 ; GFX11-NEXT:    v_mov_b32_e32 v1, s6
 ; GFX11-NEXT:    global_store_b32 v0, v1, s[0:1]
 ; GFX11-NEXT:    s_add_u32 s0, s0, 4
 ; GFX11-NEXT:    s_addc_u32 s1, s1, 0
-; GFX11-NEXT:    s_cmpk_eq_i32 s5, 0x400
+; GFX11-NEXT:    s_cmpk_eq_i32 s4, 0x400
 ; GFX11-NEXT:    s_cbranch_scc0 .LBB2_1
 ; GFX11-NEXT:  ; %bb.2: ; %bb2
 ; GFX11-NEXT:    s_nop 0
@@ -495,39 +495,40 @@ define amdgpu_kernel void @srem32_invariant_denom(ptr addrspace(1) nocapture %ar
 ; GFX9-LABEL: srem32_invariant_denom:
 ; GFX9:       ; %bb.0: ; %bb
 ; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
-; GFX9-NEXT:    s_mov_b32 s4, 0
-; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    s_ashr_i32 s3, s2, 31
 ; GFX9-NEXT:    s_add_i32 s2, s2, s3
-; GFX9-NEXT:    s_xor_b32 s3, s2, s3
-; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s3
-; GFX9-NEXT:    s_sub_i32 s2, 0, s3
+; GFX9-NEXT:    s_xor_b32 s2, s2, s3
+; GFX9-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GFX9-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT:    s_sub_i32 s4, 0, s2
+; GFX9-NEXT:    s_mov_b32 s3, 0
 ; GFX9-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX9-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX9-NEXT:    v_cvt_u32_f32_e32 v0, v0
 ; GFX9-NEXT:    v_readfirstlane_b32 s5, v0
-; GFX9-NEXT:    s_mul_i32 s2, s2, s5
-; GFX9-NEXT:    s_mul_hi_u32 s2, s5, s2
-; GFX9-NEXT:    s_add_i32 s2, s5, s2
+; GFX9-NEXT:    s_mul_i32 s4, s4, s5
+; GFX9-NEXT:    s_mul_hi_u32 s4, s5, s4
+; GFX9-NEXT:    s_add_i32 s4, s5, s4
 ; GFX9-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX9-NEXT:  .LBB3_1: ; %bb3
 ; GFX9-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX9-NEXT:    s_mul_hi_u32 s5, s4, s2
-; GFX9-NEXT:    s_mul_i32 s5, s5, s3
-; GFX9-NEXT:    s_sub_i32 s5, s4, s5
-; GFX9-NEXT:    s_sub_i32 s6, s5, s3
-; GFX9-NEXT:    s_cmp_ge_u32 s5, s3
+; GFX9-NEXT:    s_mul_hi_u32 s5, s3, s4
+; GFX9-NEXT:    s_mul_i32 s5, s5, s2
+; GFX9-NEXT:    s_sub_i32 s5, s3, s5
+; GFX9-NEXT:    s_sub_i32 s6, s5, s2
+; GFX9-NEXT:    s_cmp_ge_u32 s5, s2
 ; GFX9-NEXT:    s_cselect_b32 s5, s6, s5
-; GFX9-NEXT:    s_sub_i32 s6, s5, s3
-; GFX9-NEXT:    s_cmp_ge_u32 s5, s3
+; GFX9-NEXT:    s_sub_i32 s6, s5, s2
+; GFX9-NEXT:    s_cmp_ge_u32 s5, s2
 ; GFX9-NEXT:    s_cselect_b32 s5, s6, s5
-; GFX9-NEXT:    s_add_i32 s4, s4, 1
+; GFX9-NEXT:    s_add_i32 s3, s3, 1
 ; GFX9-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX9-NEXT:    s_add_u32 s0, s0, 4
 ; GFX9-NEXT:    s_addc_u32 s1, s1, 0
-; GFX9-NEXT:    s_cmpk_eq_i32 s4, 0x400
+; GFX9-NEXT:    s_cmpk_eq_i32 s3, 0x400
 ; GFX9-NEXT:    s_cbranch_scc0 .LBB3_1
 ; GFX9-NEXT:  ; %bb.2: ; %bb2
 ; GFX9-NEXT:    s_endpgm
@@ -539,37 +540,37 @@ define amdgpu_kernel void @srem32_invariant_denom(ptr addrspace(1) nocapture %ar
 ; GFX10-NEXT:    s_ashr_i32 s3, s2, 31
 ; GFX10-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x24
 ; GFX10-NEXT:    s_add_i32 s2, s2, s3
-; GFX10-NEXT:    s_xor_b32 s3, s2, s3
-; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, s3
-; GFX10-NEXT:    s_sub_i32 s4, 0, s3
+; GFX10-NEXT:    s_xor_b32 s2, s2, s3
+; GFX10-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GFX10-NEXT:    s_sub_i32 s3, 0, s2
 ; GFX10-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX10-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX10-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX10-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX10-NEXT:    v_readfirstlane_b32 s4, v0
 ; GFX10-NEXT:    v_mov_b32_e32 v0, 0
-; GFX10-NEXT:    s_mul_i32 s4, s4, s2
-; GFX10-NEXT:    s_mul_hi_u32 s5, s2, s4
-; GFX10-NEXT:    s_mov_b32 s4, 0
-; GFX10-NEXT:    s_add_i32 s2, s2, s5
+; GFX10-NEXT:    s_mul_i32 s3, s3, s4
+; GFX10-NEXT:    s_mul_hi_u32 s5, s4, s3
+; GFX10-NEXT:    s_mov_b32 s3, 0
+; GFX10-NEXT:    s_add_i32 s4, s4, s5
 ; GFX10-NEXT:  .LBB3_1: ; %bb3
 ; GFX10-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GFX10-NEXT:    s_mul_hi_u32 s5, s4, s2
-; GFX10-NEXT:    s_mul_i32 s5, s5, s3
-; GFX10-NEXT:    s_sub_i32 s5, s4, s5
-; GFX10-NEXT:    s_sub_i32 s6, s5, s3
-; GFX10-NEXT:    s_cmp_ge_u32 s5, s3
+; GFX10-NEXT:    s_mul_hi_u32 s5, s3, s4
+; GFX10-NEXT:    s_mul_i32 s5, s5, s2
+; GFX10-NEXT:    s_sub_i32 s5, s3, s5
+; GFX10-NEXT:    s_sub_i32 s6, s5, s2
+; GFX10-NEXT:    s_cmp_ge_u32 s5, s2
 ; GFX10-NEXT:    s_cselect_b32 s5, s6, s5
-; GFX10-NEXT:    s_sub_i32 s6, s5, s3
-; GFX10-NEXT:    s_cmp_ge_u32 s5, s3
+; GFX10-NEXT:    s_sub_i32 s6, s5, s2
+; GFX10-NEXT:    s_cmp_ge_u32 s5, s2
 ; GFX10-NEXT:    s_cselect_b32 s5, s6, s5
-; GFX10-NEXT:    s_add_i32 s4, s4, 1
+; GFX10-NEXT:    s_add_i32 s3, s3, 1
 ; GFX10-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    global_store_dword v0, v1, s[0:1]
 ; GFX10-NEXT:    s_waitcnt_depctr 0xffe3
 ; GFX10-NEXT:    s_add_u32 s0, s0, 4
 ; GFX10-NEXT:    s_addc_u32 s1, s1, 0
-; GFX10-NEXT:    s_cmpk_eq_i32 s4, 0x400
+; GFX10-NEXT:    s_cmpk_eq_i32 s3, 0x400
 ; GFX10-NEXT:    s_cbranch_scc0 .LBB3_1
 ; GFX10-NEXT:  ; %bb.2: ; %bb2
 ; GFX10-NEXT:    s_endpgm
@@ -583,43 +584,43 @@ define amdgpu_kernel void @srem32_invariant_denom(ptr addrspace(1) nocapture %ar
 ; GFX11-NEXT:    s_ashr_i32 s3, s2, 31
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
 ; GFX11-NEXT:    s_add_i32 s2, s2, s3
-; GFX11-NEXT:    s_xor_b32 s3, s2, s3
+; GFX11-NEXT:    s_xor_b32 s2, s2, s3
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s3
-; GFX11-NEXT:    s_sub_i32 s4, 0, s3
+; GFX11-NEXT:    v_cvt_f32_u32_e32 v0, s2
+; GFX11-NEXT:    s_sub_i32 s3, 0, s2
 ; GFX11-NEXT:    v_rcp_iflag_f32_e32 v0, v0
 ; GFX11-NEXT:    s_waitcnt_depctr 0xfff
 ; GFX11-NEXT:    v_mul_f32_e32 v0, 0x4f7ffffe, v0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_cvt_u32_f32_e32 v0, v0
-; GFX11-NEXT:    v_readfirstlane_b32 s2, v0
+; GFX11-NEXT:    v_readfirstlane_b32 s4, v0
 ; GFX11-NEXT:    v_mov_b32_e32 v0, 0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_mul_i32 s4, s4, s2
-; GFX11-NEXT:    s_mul_hi_u32 s5, s2, s4
-; GFX11-NEXT:    s_mov_b32 s4, 0
-; GFX11-NEXT:    s_add_i32 s2, s2, s5
+; GFX11-NEXT:    s_mul_i32 s3, s3, s4
+; GFX11-NEXT:    s_mul_hi_u32 s5, s4, s3
+; GFX11-NEXT:    s_mov_b32 s3, 0
+; GFX11-NEXT:    s_add_i32 s4, s4, s5
 ; GFX11-NEXT:    .p2align 6
 ; GFX11-NEXT:  .LBB3_1: ; %bb3
 ; GFX11-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_mul_hi_u32 s5, s4, s2
-; GFX11-NEXT:    s_mul_i32 s5, s5, s3
+; GFX11-NEXT:    s_mul_hi_u32 s5, s3, s4
+; GFX11-NEXT:    s_mul_i32 s5, s5, s2
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT:    s_sub_i32 s5, s4, s5
-; GFX11-NEXT:    s_sub_i32 s6, s5, s3
-; GFX11-NEXT:    s_cmp_ge_u32 s5, s3
+; GFX11-NEXT:    s_sub_i32 s5, s3, s5
+; GFX11-NEXT:    s_sub_i32 s6, s5, s2
+; GFX11-NEXT:    s_cmp_ge_u32 s5, s2
 ; GFX11-NEXT:    s_cselect_b32 s5, s6, s5
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT:    s_sub_i32 s6, s5, s3
-; GFX11-NEXT:    s_cmp_ge_u32 s5, s3
+; GFX11-NEXT:    s_sub_i32 s6, s5, s2
+; GFX11-NEXT:    s_cmp_ge_u32 s5, s2
 ; GFX11-NEXT:    s_cselect_b32 s5, s6, s5
-; GFX11-NEXT:    s_add_i32 s4, s4, 1
+; GFX11-NEXT:    s_add_i32 s3, s3, 1
 ; GFX11-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX11-NEXT:    global_store_b32 v0, v1, s[0:1]
 ; GFX11-NEXT:    s_add_u32 s0, s0, 4
 ; GFX11-NEXT:    s_addc_u32 s1, s1, 0
-; GFX11-NEXT:    s_cmpk_eq_i32 s4, 0x400
+; GFX11-NEXT:    s_cmpk_eq_i32 s3, 0x400
 ; GFX11-NEXT:    s_cbranch_scc0 .LBB3_1
 ; GFX11-NEXT:  ; %bb.2: ; %bb2
 ; GFX11-NEXT:    s_nop 0

diff  --git a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
index 7abb789019f1f0..488dbe2e3189bf 100644
--- a/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
+++ b/llvm/test/CodeGen/AMDGPU/llc-pipeline.ll
@@ -334,6 +334,7 @@
 ; GCN-O1-NEXT:        Machine Natural Loop Construction
 ; GCN-O1-NEXT:        Register Coalescer
 ; GCN-O1-NEXT:        Rename Disconnected Subregister Components
+; GCN-O1-NEXT:        Rewrite Partial Register Uses
 ; GCN-O1-NEXT:        Machine Instruction Scheduler
 ; GCN-O1-NEXT:        SI Whole Quad Mode
 ; GCN-O1-NEXT:        SI optimize exec mask operations pre-RA
@@ -622,6 +623,7 @@
 ; GCN-O1-OPTS-NEXT:        Machine Natural Loop Construction
 ; GCN-O1-OPTS-NEXT:        Register Coalescer
 ; GCN-O1-OPTS-NEXT:        Rename Disconnected Subregister Components
+; GCN-O1-OPTS-NEXT:        Rewrite Partial Register Uses
 ; GCN-O1-OPTS-NEXT:        AMDGPU Pre-RA optimizations
 ; GCN-O1-OPTS-NEXT:        Machine Instruction Scheduler
 ; GCN-O1-OPTS-NEXT:        SI Whole Quad Mode
@@ -922,6 +924,7 @@
 ; GCN-O2-NEXT:        Machine Natural Loop Construction
 ; GCN-O2-NEXT:        Register Coalescer
 ; GCN-O2-NEXT:        Rename Disconnected Subregister Components
+; GCN-O2-NEXT:        Rewrite Partial Register Uses
 ; GCN-O2-NEXT:        AMDGPU Pre-RA optimizations
 ; GCN-O2-NEXT:        Machine Instruction Scheduler
 ; GCN-O2-NEXT:        SI Whole Quad Mode
@@ -1235,6 +1238,7 @@
 ; GCN-O3-NEXT:        Machine Natural Loop Construction
 ; GCN-O3-NEXT:        Register Coalescer
 ; GCN-O3-NEXT:        Rename Disconnected Subregister Components
+; GCN-O3-NEXT:        Rewrite Partial Register Uses
 ; GCN-O3-NEXT:        AMDGPU Pre-RA optimizations
 ; GCN-O3-NEXT:        Machine Instruction Scheduler
 ; GCN-O3-NEXT:        SI Whole Quad Mode

diff  --git a/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll b/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
index ab527481235ca9..249acec639540b 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.mulo.ll
@@ -181,22 +181,22 @@ define { i64, i1 } @smulo_i64_v_v(i64 %x, i64 %y) {
 ; GFX10-NEXT:    v_mov_b32_e32 v5, v1
 ; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s4, v4, v2, 0
 ; GFX10-NEXT:    v_mad_u64_u32 v[6:7], s4, v4, v3, 0
-; GFX10-NEXT:    v_mad_u64_u32 v[9:10], s4, v5, v2, 0
-; GFX10-NEXT:    v_mad_i64_i32 v[11:12], s4, v5, v3, 0
-; GFX10-NEXT:    v_mov_b32_e32 v8, v1
-; GFX10-NEXT:    v_add3_u32 v1, v1, v6, v9
-; GFX10-NEXT:    v_add_co_u32 v8, vcc_lo, v8, v6
+; GFX10-NEXT:    v_mad_u64_u32 v[8:9], s4, v5, v2, 0
+; GFX10-NEXT:    v_mad_i64_i32 v[10:11], s4, v5, v3, 0
+; GFX10-NEXT:    v_mov_b32_e32 v12, v1
+; GFX10-NEXT:    v_add3_u32 v1, v1, v6, v8
+; GFX10-NEXT:    v_add_co_u32 v12, vcc_lo, v12, v6
 ; GFX10-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v8, vcc_lo, v8, v9
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, v7, v10, vcc_lo
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, 0, v12, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v7, vcc_lo, v7, v11
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v12, vcc_lo, v12, v8
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, v7, v9, vcc_lo
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, 0, v11, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v7, vcc_lo, v7, v10
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo
 ; GFX10-NEXT:    v_sub_co_u32 v2, vcc_lo, v7, v2
-; GFX10-NEXT:    v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v8, vcc_lo
+; GFX10-NEXT:    v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v9, vcc_lo
 ; GFX10-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 0, v5
 ; GFX10-NEXT:    v_cndmask_b32_e32 v6, v7, v2, vcc_lo
-; GFX10-NEXT:    v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX10-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc_lo
 ; GFX10-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
 ; GFX10-NEXT:    v_sub_co_u32 v4, vcc_lo, v6, v4
 ; GFX10-NEXT:    v_subrev_co_ci_u32_e32 v7, vcc_lo, 0, v5, vcc_lo
@@ -215,28 +215,28 @@ define { i64, i1 } @smulo_i64_v_v(i64 %x, i64 %y) {
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_mad_u64_u32 v[0:1], null, v4, v2, 0
 ; GFX11-NEXT:    v_mad_u64_u32 v[6:7], null, v4, v3, 0
-; GFX11-NEXT:    v_mad_u64_u32 v[9:10], null, v5, v2, 0
-; GFX11-NEXT:    v_mad_i64_i32 v[11:12], null, v5, v3, 0
+; GFX11-NEXT:    v_mad_u64_u32 v[8:9], null, v5, v2, 0
+; GFX11-NEXT:    v_mad_i64_i32 v[10:11], null, v5, v3, 0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_mov_b32_e32 v8, v1
-; GFX11-NEXT:    v_add3_u32 v1, v1, v6, v9
+; GFX11-NEXT:    v_mov_b32_e32 v12, v1
+; GFX11-NEXT:    v_add3_u32 v1, v1, v6, v8
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_u32 v8, vcc_lo, v8, v6
+; GFX11-NEXT:    v_add_co_u32 v12, vcc_lo, v12, v6
 ; GFX11-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v8, vcc_lo, v8, v9
+; GFX11-NEXT:    v_add_co_u32 v12, vcc_lo, v12, v8
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, v7, v10, vcc_lo
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, 0, v12, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v7, vcc_lo, v7, v11
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, v7, v9, vcc_lo
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, 0, v11, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v7, vcc_lo, v7, v10
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, 0, v8, vcc_lo
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, 0, v9, vcc_lo
 ; GFX11-NEXT:    v_sub_co_u32 v2, vcc_lo, v7, v2
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v8, vcc_lo
+; GFX11-NEXT:    v_subrev_co_ci_u32_e32 v10, vcc_lo, 0, v9, vcc_lo
 ; GFX11-NEXT:    v_cmp_gt_i32_e32 vcc_lo, 0, v5
 ; GFX11-NEXT:    v_cndmask_b32_e32 v6, v7, v2, vcc_lo
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
-; GFX11-NEXT:    v_cndmask_b32_e32 v5, v8, v10, vcc_lo
+; GFX11-NEXT:    v_cndmask_b32_e32 v5, v9, v10, vcc_lo
 ; GFX11-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
 ; GFX11-NEXT:    v_sub_co_u32 v4, vcc_lo, v6, v4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)

diff  --git a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
index 2e7fa86e8ab8b6..d100cadb8ee579 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i1.ll
@@ -3562,13 +3562,13 @@ define amdgpu_kernel void @constant_sextload_i1_to_i64(ptr addrspace(1) %out, pt
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s3
-; GFX8-NEXT:    flat_load_ubyte v0, v[0:1]
-; GFX8-NEXT:    v_mov_b32_e32 v1, s0
-; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    flat_load_ubyte v2, v[0:1]
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_bfe_i32 v3, v0, 0, 1
-; GFX8-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GFX8-NEXT:    flat_store_dwordx2 v[1:2], v[3:4]
+; GFX8-NEXT:    v_bfe_i32 v2, v2, 0, 1
+; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; GFX8-NEXT:    s_endpgm
 ;
 ; EG-LABEL: constant_sextload_i1_to_i64:
@@ -3677,13 +3677,13 @@ define amdgpu_kernel void @constant_sextload_v1i1_to_v1i64(ptr addrspace(1) %out
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s3
-; GFX8-NEXT:    flat_load_ubyte v0, v[0:1]
-; GFX8-NEXT:    v_mov_b32_e32 v1, s0
-; GFX8-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NEXT:    flat_load_ubyte v2, v[0:1]
+; GFX8-NEXT:    v_mov_b32_e32 v0, s0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_bfe_i32 v3, v0, 0, 1
-; GFX8-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GFX8-NEXT:    flat_store_dwordx2 v[1:2], v[3:4]
+; GFX8-NEXT:    v_bfe_i32 v2, v2, 0, 1
+; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; GFX8-NEXT:    s_endpgm
 ;
 ; EG-LABEL: constant_sextload_v1i1_to_v1i64:
@@ -4755,66 +4755,66 @@ define amdgpu_kernel void @constant_sextload_v16i1_to_v16i64(ptr addrspace(1) %o
 ; GFX6-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX6-NEXT:    s_mov_b32 s8, s6
 ; GFX6-NEXT:    s_mov_b32 s9, s7
-; GFX6-NEXT:    buffer_load_ushort v5, off, s[8:11], 0
+; GFX6-NEXT:    buffer_load_ushort v1, off, s[8:11], 0
 ; GFX6-NEXT:    s_mov_b32 s0, s4
 ; GFX6-NEXT:    s_mov_b32 s1, s5
 ; GFX6-NEXT:    s_waitcnt vmcnt(0)
-; GFX6-NEXT:    v_lshrrev_b32_e32 v0, 14, v5
-; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 15, v5
-; GFX6-NEXT:    v_lshrrev_b32_e32 v7, 12, v5
-; GFX6-NEXT:    v_lshrrev_b32_e32 v9, 10, v5
-; GFX6-NEXT:    v_lshrrev_b32_e32 v11, 11, v5
-; GFX6-NEXT:    v_lshrrev_b32_e32 v12, 8, v5
-; GFX6-NEXT:    v_lshrrev_b32_e32 v13, 9, v5
-; GFX6-NEXT:    v_lshrrev_b32_e32 v8, 6, v5
-; GFX6-NEXT:    v_lshrrev_b32_e32 v10, 7, v5
-; GFX6-NEXT:    v_lshrrev_b32_e32 v4, 4, v5
-; GFX6-NEXT:    v_lshrrev_b32_e32 v6, 5, v5
-; GFX6-NEXT:    v_lshrrev_b32_e32 v14, 2, v5
-; GFX6-NEXT:    v_bfe_i32 v2, v1, 0, 1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v3, 14, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v5, 15, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v7, 12, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v9, 13, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v11, 10, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v12, 8, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v13, 9, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v8, 6, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v10, 7, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v4, 4, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v6, 5, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v0, 2, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v2, 3, v1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v16, 1, v1
+; GFX6-NEXT:    v_bfe_i32 v2, v2, 0, 1
 ; GFX6-NEXT:    v_bfe_i32 v0, v0, 0, 1
-; GFX6-NEXT:    v_lshrrev_b32_e32 v15, 3, v5
-; GFX6-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX6-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:112
-; GFX6-NEXT:    s_waitcnt expcnt(0)
-; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 1, v5
-; GFX6-NEXT:    v_bfe_i32 v2, v15, 0, 1
-; GFX6-NEXT:    v_bfe_i32 v0, v14, 0, 1
 ; GFX6-NEXT:    v_bfe_i32 v6, v6, 0, 1
 ; GFX6-NEXT:    v_bfe_i32 v4, v4, 0, 1
 ; GFX6-NEXT:    v_bfe_i32 v10, v10, 0, 1
 ; GFX6-NEXT:    v_bfe_i32 v8, v8, 0, 1
 ; GFX6-NEXT:    v_bfe_i32 v14, v13, 0, 1
 ; GFX6-NEXT:    v_bfe_i32 v12, v12, 0, 1
-; GFX6-NEXT:    v_bfe_i32 v18, v11, 0, 1
-; GFX6-NEXT:    v_bfe_i32 v16, v9, 0, 1
-; GFX6-NEXT:    v_bfe_i32 v22, v1, 0, 1
-; GFX6-NEXT:    v_bfe_i32 v20, v5, 0, 1
-; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 13, v5
-; GFX6-NEXT:    v_bfe_i32 v26, v1, 0, 1
-; GFX6-NEXT:    v_bfe_i32 v24, v7, 0, 1
-; GFX6-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GFX6-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
+; GFX6-NEXT:    v_bfe_i32 v17, v5, 0, 1
+; GFX6-NEXT:    v_bfe_i32 v15, v3, 0, 1
+; GFX6-NEXT:    v_bfe_i32 v21, v16, 0, 1
+; GFX6-NEXT:    v_bfe_i32 v19, v1, 0, 1
+; GFX6-NEXT:    v_lshrrev_b32_e32 v1, 11, v1
+; GFX6-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GFX6-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
+; GFX6-NEXT:    buffer_store_dwordx4 v[15:18], off, s[0:3], 0 offset:112
+; GFX6-NEXT:    v_bfe_i32 v25, v1, 0, 1
+; GFX6-NEXT:    v_bfe_i32 v23, v11, 0, 1
+; GFX6-NEXT:    v_bfe_i32 v29, v9, 0, 1
+; GFX6-NEXT:    v_bfe_i32 v27, v7, 0, 1
+; GFX6-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
+; GFX6-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
+; GFX6-NEXT:    s_waitcnt expcnt(0)
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
 ; GFX6-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
-; GFX6-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GFX6-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GFX6-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
-; GFX6-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GFX6-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:96
-; GFX6-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:80
+; GFX6-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GFX6-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
+; GFX6-NEXT:    v_ashrrev_i32_e32 v30, 31, v29
+; GFX6-NEXT:    v_ashrrev_i32_e32 v28, 31, v27
+; GFX6-NEXT:    buffer_store_dwordx4 v[27:30], off, s[0:3], 0 offset:96
+; GFX6-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:80
 ; GFX6-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:64
 ; GFX6-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0 offset:48
 ; GFX6-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:32
 ; GFX6-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
-; GFX6-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0
+; GFX6-NEXT:    buffer_store_dwordx4 v[19:22], off, s[0:3], 0
 ; GFX6-NEXT:    s_endpgm
 ;
 ; GFX8-LABEL: constant_sextload_v16i1_to_v16i64:
@@ -4838,78 +4838,78 @@ define amdgpu_kernel void @constant_sextload_v16i1_to_v16i64(ptr addrspace(1) %o
 ; GFX8-NEXT:    v_mov_b32_e32 v9, s2
 ; GFX8-NEXT:    s_add_u32 s2, s0, 64
 ; GFX8-NEXT:    s_addc_u32 s3, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v12, s3
-; GFX8-NEXT:    v_mov_b32_e32 v11, s2
+; GFX8-NEXT:    v_mov_b32_e32 v16, s3
+; GFX8-NEXT:    v_mov_b32_e32 v15, s2
 ; GFX8-NEXT:    s_add_u32 s2, s0, 48
 ; GFX8-NEXT:    s_addc_u32 s3, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v19, s3
-; GFX8-NEXT:    v_mov_b32_e32 v18, s2
+; GFX8-NEXT:    v_mov_b32_e32 v23, s3
+; GFX8-NEXT:    v_mov_b32_e32 v22, s2
 ; GFX8-NEXT:    s_add_u32 s2, s0, 32
-; GFX8-NEXT:    v_mov_b32_e32 v17, s1
+; GFX8-NEXT:    v_mov_b32_e32 v21, s1
 ; GFX8-NEXT:    s_addc_u32 s3, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v16, s0
+; GFX8-NEXT:    v_mov_b32_e32 v20, s0
 ; GFX8-NEXT:    s_add_u32 s0, s0, 16
 ; GFX8-NEXT:    s_addc_u32 s1, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v21, s3
-; GFX8-NEXT:    v_mov_b32_e32 v23, s1
-; GFX8-NEXT:    v_mov_b32_e32 v20, s2
-; GFX8-NEXT:    v_mov_b32_e32 v22, s0
+; GFX8-NEXT:    v_mov_b32_e32 v25, s3
+; GFX8-NEXT:    v_mov_b32_e32 v27, s1
+; GFX8-NEXT:    v_mov_b32_e32 v24, s2
+; GFX8-NEXT:    v_mov_b32_e32 v26, s0
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v1, 14, v0
 ; GFX8-NEXT:    v_lshrrev_b16_e32 v2, 15, v0
 ; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 1
 ; GFX8-NEXT:    v_bfe_i32 v1, v1, 0, 1
-; GFX8-NEXT:    v_lshrrev_b16_e32 v13, 12, v0
-; GFX8-NEXT:    v_lshrrev_b16_e32 v14, 13, v0
+; GFX8-NEXT:    v_lshrrev_b16_e32 v11, 12, v0
+; GFX8-NEXT:    v_lshrrev_b16_e32 v12, 13, v0
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
 ; GFX8-NEXT:    flat_store_dwordx4 v[5:6], v[1:4]
-; GFX8-NEXT:    v_lshrrev_b16_e32 v15, 10, v0
-; GFX8-NEXT:    v_bfe_i32 v3, v14, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v1, v13, 0, 1
-; GFX8-NEXT:    v_lshrrev_b16_e32 v5, 11, v0
+; GFX8-NEXT:    v_lshrrev_b16_e32 v13, 10, v0
+; GFX8-NEXT:    v_bfe_i32 v3, v12, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v1, v11, 0, 1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v14, 11, v0
+; GFX8-NEXT:    v_lshrrev_b16_e32 v6, 3, v0
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v5, 2, v0
 ; GFX8-NEXT:    flat_store_dwordx4 v[7:8], v[1:4]
-; GFX8-NEXT:    v_lshrrev_b16_e32 v6, 8, v0
-; GFX8-NEXT:    v_bfe_i32 v3, v5, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v1, v15, 0, 1
-; GFX8-NEXT:    v_lshrrev_b16_e32 v13, 9, v0
-; GFX8-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GFX8-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; GFX8-NEXT:    flat_store_dwordx4 v[9:10], v[1:4]
-; GFX8-NEXT:    v_lshrrev_b16_e32 v7, 6, v0
-; GFX8-NEXT:    v_bfe_i32 v3, v13, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v1, v6, 0, 1
-; GFX8-NEXT:    v_lshrrev_b16_e32 v14, 7, v0
-; GFX8-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GFX8-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; GFX8-NEXT:    v_lshrrev_b16_e32 v5, 4, v0
-; GFX8-NEXT:    v_lshrrev_b16_e32 v8, 5, v0
-; GFX8-NEXT:    v_lshrrev_b16_e32 v9, 2, v0
-; GFX8-NEXT:    v_lshrrev_b16_e32 v6, 3, v0
-; GFX8-NEXT:    flat_store_dwordx4 v[11:12], v[1:4]
-; GFX8-NEXT:    v_bfe_i32 v14, v14, 0, 1
-; GFX8-NEXT:    v_lshrrev_b16_e32 v2, 1, v0
-; GFX8-NEXT:    v_bfe_i32 v12, v7, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v2, v2, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v6, v6, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v4, v9, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v10, v8, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v8, v5, 0, 1
-; GFX8-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
-; GFX8-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
-; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; GFX8-NEXT:    v_lshrrev_b16_e32 v17, 8, v0
+; GFX8-NEXT:    v_bfe_i32 v3, v6, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v6, v14, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v4, v13, 0, 1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v18, 9, v0
+; GFX8-NEXT:    v_bfe_i32 v1, v5, 0, 1
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
+; GFX8-NEXT:    flat_store_dwordx4 v[9:10], v[4:7]
+; GFX8-NEXT:    v_bfe_i32 v10, v18, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v8, v17, 0, 1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v19, 6, v0
+; GFX8-NEXT:    v_lshrrev_b16_e32 v28, 4, v0
+; GFX8-NEXT:    v_lshrrev_b16_e32 v29, 5, v0
+; GFX8-NEXT:    v_lshrrev_b16_e32 v2, 1, v0
+; GFX8-NEXT:    v_bfe_i32 v12, v0, 0, 1
+; GFX8-NEXT:    v_lshrrev_b16_e32 v0, 7, v0
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GFX8-NEXT:    flat_store_dwordx4 v[18:19], v[12:15]
-; GFX8-NEXT:    flat_store_dwordx4 v[20:21], v[8:11]
-; GFX8-NEXT:    flat_store_dwordx4 v[22:23], v[4:7]
-; GFX8-NEXT:    flat_store_dwordx4 v[16:17], v[0:3]
+; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[8:11]
+; GFX8-NEXT:    v_bfe_i32 v18, v0, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v16, v19, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v7, v29, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v5, v28, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v14, v2, 0, 1
+; GFX8-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
+; GFX8-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GFX8-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
+; GFX8-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
+; GFX8-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
+; GFX8-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
+; GFX8-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
+; GFX8-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
+; GFX8-NEXT:    flat_store_dwordx4 v[22:23], v[16:19]
+; GFX8-NEXT:    flat_store_dwordx4 v[24:25], v[5:8]
+; GFX8-NEXT:    flat_store_dwordx4 v[26:27], v[1:4]
+; GFX8-NEXT:    flat_store_dwordx4 v[20:21], v[12:15]
 ; GFX8-NEXT:    s_endpgm
 ;
 ; EG-LABEL: constant_sextload_v16i1_to_v16i64:
@@ -5635,187 +5635,187 @@ define amdgpu_kernel void @constant_sextload_v32i1_to_v32i64(ptr addrspace(1) %o
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_load_dword s8, s[2:3], 0x0
+; GFX8-NEXT:    s_load_dword s4, s[2:3], 0x0
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_lshr_b32 s10, s8, 22
-; GFX8-NEXT:    s_lshr_b32 s12, s8, 23
-; GFX8-NEXT:    s_lshr_b32 s14, s8, 20
-; GFX8-NEXT:    s_lshr_b32 s16, s8, 21
-; GFX8-NEXT:    s_lshr_b32 s18, s8, 18
-; GFX8-NEXT:    s_lshr_b32 s20, s8, 19
-; GFX8-NEXT:    s_lshr_b32 s22, s8, 16
-; GFX8-NEXT:    s_lshr_b32 s24, s8, 17
-; GFX8-NEXT:    s_lshr_b32 s6, s8, 24
-; GFX8-NEXT:    s_bfe_i64 s[2:3], s[6:7], 0x10000
-; GFX8-NEXT:    s_bfe_i64 s[4:5], s[8:9], 0x10000
-; GFX8-NEXT:    s_bfe_i64 s[24:25], s[24:25], 0x10000
-; GFX8-NEXT:    s_bfe_i64 s[22:23], s[22:23], 0x10000
+; GFX8-NEXT:    s_lshr_b32 s6, s4, 22
+; GFX8-NEXT:    s_lshr_b32 s8, s4, 23
+; GFX8-NEXT:    s_lshr_b32 s10, s4, 20
+; GFX8-NEXT:    s_lshr_b32 s12, s4, 21
+; GFX8-NEXT:    s_lshr_b32 s14, s4, 18
+; GFX8-NEXT:    s_lshr_b32 s16, s4, 19
+; GFX8-NEXT:    s_lshr_b32 s18, s4, 16
+; GFX8-NEXT:    s_lshr_b32 s20, s4, 17
+; GFX8-NEXT:    s_lshr_b32 s2, s4, 24
+; GFX8-NEXT:    v_lshrrev_b16_e64 v2, 14, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v3, 15, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v15, 12, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v17, 13, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v13, 10, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v14, 11, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v9, 8, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v10, 9, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v6, 6, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v8, 7, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v4, 4, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v5, 5, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v0, 2, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v1, 3, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v7, 1, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v11, 6, s2
+; GFX8-NEXT:    v_lshrrev_b16_e64 v12, 7, s2
+; GFX8-NEXT:    v_lshrrev_b16_e64 v16, 4, s2
+; GFX8-NEXT:    v_lshrrev_b16_e64 v18, 5, s2
+; GFX8-NEXT:    v_lshrrev_b16_e64 v19, 2, s2
+; GFX8-NEXT:    v_lshrrev_b16_e64 v20, 3, s2
+; GFX8-NEXT:    v_lshrrev_b16_e64 v27, 1, s2
+; GFX8-NEXT:    s_bfe_i64 s[2:3], s[2:3], 0x10000
+; GFX8-NEXT:    s_bfe_i64 s[4:5], s[4:5], 0x10000
 ; GFX8-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x10000
 ; GFX8-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x10000
 ; GFX8-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x10000
 ; GFX8-NEXT:    s_bfe_i64 s[14:15], s[14:15], 0x10000
 ; GFX8-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x10000
 ; GFX8-NEXT:    s_bfe_i64 s[10:11], s[10:11], 0x10000
-; GFX8-NEXT:    v_mov_b32_e32 v11, s10
-; GFX8-NEXT:    s_add_u32 s10, s0, 0xb0
-; GFX8-NEXT:    v_mov_b32_e32 v12, s11
-; GFX8-NEXT:    s_addc_u32 s11, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v16, s11
-; GFX8-NEXT:    v_mov_b32_e32 v15, s10
-; GFX8-NEXT:    s_add_u32 s10, s0, 0xa0
-; GFX8-NEXT:    v_mov_b32_e32 v13, s12
-; GFX8-NEXT:    v_mov_b32_e32 v14, s13
-; GFX8-NEXT:    s_addc_u32 s11, s1, 0
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
-; GFX8-NEXT:    v_mov_b32_e32 v16, s11
-; GFX8-NEXT:    v_mov_b32_e32 v15, s10
-; GFX8-NEXT:    s_add_u32 s10, s0, 0x90
-; GFX8-NEXT:    v_mov_b32_e32 v11, s14
-; GFX8-NEXT:    v_mov_b32_e32 v12, s15
-; GFX8-NEXT:    v_mov_b32_e32 v13, s16
-; GFX8-NEXT:    v_mov_b32_e32 v14, s17
-; GFX8-NEXT:    s_addc_u32 s11, s1, 0
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
-; GFX8-NEXT:    v_mov_b32_e32 v16, s11
-; GFX8-NEXT:    v_mov_b32_e32 v15, s10
-; GFX8-NEXT:    s_add_u32 s10, s0, 0x80
-; GFX8-NEXT:    v_mov_b32_e32 v11, s18
-; GFX8-NEXT:    v_mov_b32_e32 v12, s19
-; GFX8-NEXT:    v_mov_b32_e32 v13, s20
-; GFX8-NEXT:    v_mov_b32_e32 v14, s21
-; GFX8-NEXT:    s_addc_u32 s11, s1, 0
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
-; GFX8-NEXT:    v_mov_b32_e32 v16, s11
-; GFX8-NEXT:    v_mov_b32_e32 v15, s10
-; GFX8-NEXT:    s_add_u32 s10, s0, 0x70
-; GFX8-NEXT:    v_lshrrev_b16_e64 v9, 14, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v10, 15, s8
-; GFX8-NEXT:    v_mov_b32_e32 v11, s22
-; GFX8-NEXT:    v_mov_b32_e32 v12, s23
-; GFX8-NEXT:    v_mov_b32_e32 v13, s24
-; GFX8-NEXT:    v_mov_b32_e32 v14, s25
-; GFX8-NEXT:    s_addc_u32 s11, s1, 0
-; GFX8-NEXT:    v_lshrrev_b16_e64 v7, 12, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v8, 13, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v5, 10, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v6, 11, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v3, 8, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v4, 9, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v1, 6, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v2, 7, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v0, 4, s8
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
-; GFX8-NEXT:    v_lshrrev_b16_e64 v16, 5, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v20, 2, s8
-; GFX8-NEXT:    v_bfe_i32 v11, v10, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v9, v9, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v14, s11
-; GFX8-NEXT:    v_lshrrev_b16_e64 v21, 3, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v22, 1, s8
-; GFX8-NEXT:    s_add_u32 s8, s0, 0x60
-; GFX8-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
-; GFX8-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
-; GFX8-NEXT:    v_mov_b32_e32 v13, s10
-; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    flat_store_dwordx4 v[13:14], v[9:12]
-; GFX8-NEXT:    v_bfe_i32 v7, v7, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v12, s9
-; GFX8-NEXT:    v_bfe_i32 v9, v8, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v11, s8
-; GFX8-NEXT:    s_add_u32 s8, s0, 0x50
-; GFX8-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
-; GFX8-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
-; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    flat_store_dwordx4 v[11:12], v[7:10]
-; GFX8-NEXT:    v_bfe_i32 v5, v5, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v10, s9
-; GFX8-NEXT:    v_bfe_i32 v7, v6, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v9, s8
-; GFX8-NEXT:    s_add_u32 s8, s0, 64
-; GFX8-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
-; GFX8-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
-; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    flat_store_dwordx4 v[9:10], v[5:8]
-; GFX8-NEXT:    v_mov_b32_e32 v11, s9
-; GFX8-NEXT:    v_bfe_i32 v5, v4, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v3, v3, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v10, s8
-; GFX8-NEXT:    s_add_u32 s8, s0, 48
-; GFX8-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
-; GFX8-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    flat_store_dwordx4 v[10:11], v[3:6]
-; GFX8-NEXT:    v_bfe_i32 v1, v1, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v11, s9
-; GFX8-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GFX8-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; GFX8-NEXT:    v_mov_b32_e32 v10, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v12, 6, s6
-; GFX8-NEXT:    v_lshrrev_b16_e64 v13, 7, s6
-; GFX8-NEXT:    v_lshrrev_b16_e64 v8, 4, s6
-; GFX8-NEXT:    v_lshrrev_b16_e64 v9, 5, s6
-; GFX8-NEXT:    v_lshrrev_b16_e64 v5, 2, s6
-; GFX8-NEXT:    v_lshrrev_b16_e64 v6, 3, s6
-; GFX8-NEXT:    flat_store_dwordx4 v[10:11], v[1:4]
-; GFX8-NEXT:    v_bfe_i32 v18, v16, 0, 1
-; GFX8-NEXT:    v_lshrrev_b16_e64 v1, 1, s6
+; GFX8-NEXT:    s_bfe_i64 s[8:9], s[8:9], 0x10000
+; GFX8-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
+; GFX8-NEXT:    v_mov_b32_e32 v21, s6
+; GFX8-NEXT:    s_add_u32 s6, s0, 0xb0
+; GFX8-NEXT:    v_mov_b32_e32 v22, s7
+; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    v_mov_b32_e32 v26, s7
+; GFX8-NEXT:    v_mov_b32_e32 v25, s6
+; GFX8-NEXT:    s_add_u32 s6, s0, 0xa0
+; GFX8-NEXT:    v_mov_b32_e32 v23, s8
+; GFX8-NEXT:    v_mov_b32_e32 v24, s9
+; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    flat_store_dwordx4 v[25:26], v[21:24]
+; GFX8-NEXT:    v_mov_b32_e32 v26, s7
+; GFX8-NEXT:    v_mov_b32_e32 v25, s6
+; GFX8-NEXT:    s_add_u32 s6, s0, 0x90
+; GFX8-NEXT:    v_mov_b32_e32 v21, s10
+; GFX8-NEXT:    v_mov_b32_e32 v22, s11
+; GFX8-NEXT:    v_mov_b32_e32 v23, s12
+; GFX8-NEXT:    v_mov_b32_e32 v24, s13
+; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    flat_store_dwordx4 v[25:26], v[21:24]
+; GFX8-NEXT:    v_mov_b32_e32 v26, s7
+; GFX8-NEXT:    v_mov_b32_e32 v25, s6
+; GFX8-NEXT:    s_add_u32 s6, s0, 0x80
+; GFX8-NEXT:    v_mov_b32_e32 v21, s14
+; GFX8-NEXT:    v_mov_b32_e32 v22, s15
+; GFX8-NEXT:    v_mov_b32_e32 v23, s16
+; GFX8-NEXT:    v_mov_b32_e32 v24, s17
+; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    flat_store_dwordx4 v[25:26], v[21:24]
+; GFX8-NEXT:    v_mov_b32_e32 v26, s7
+; GFX8-NEXT:    v_mov_b32_e32 v21, s18
+; GFX8-NEXT:    v_mov_b32_e32 v22, s19
+; GFX8-NEXT:    v_mov_b32_e32 v23, s20
+; GFX8-NEXT:    v_mov_b32_e32 v24, s21
+; GFX8-NEXT:    v_mov_b32_e32 v25, s6
+; GFX8-NEXT:    s_add_u32 s6, s0, 0x70
+; GFX8-NEXT:    flat_store_dwordx4 v[25:26], v[21:24]
+; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    v_bfe_i32 v23, v3, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v21, v2, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v2, s6
+; GFX8-NEXT:    v_mov_b32_e32 v3, s7
+; GFX8-NEXT:    s_add_u32 s6, s0, 0x60
+; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
+; GFX8-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
+; GFX8-NEXT:    v_mov_b32_e32 v26, s7
+; GFX8-NEXT:    flat_store_dwordx4 v[2:3], v[21:24]
+; GFX8-NEXT:    v_mov_b32_e32 v25, s6
+; GFX8-NEXT:    v_bfe_i32 v23, v17, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v21, v15, 0, 1
+; GFX8-NEXT:    s_add_u32 s6, s0, 0x50
+; GFX8-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
+; GFX8-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
+; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    flat_store_dwordx4 v[25:26], v[21:24]
+; GFX8-NEXT:    v_bfe_i32 v25, v14, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v23, v13, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v14, s7
+; GFX8-NEXT:    v_mov_b32_e32 v13, s6
+; GFX8-NEXT:    s_add_u32 s6, s0, 64
+; GFX8-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GFX8-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
+; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    flat_store_dwordx4 v[13:14], v[23:26]
+; GFX8-NEXT:    v_bfe_i32 v12, v12, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v25, v10, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v23, v9, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v10, s7
+; GFX8-NEXT:    v_mov_b32_e32 v9, s6
+; GFX8-NEXT:    s_add_u32 s6, s0, 48
+; GFX8-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GFX8-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
+; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    flat_store_dwordx4 v[9:10], v[23:26]
+; GFX8-NEXT:    v_bfe_i32 v10, v11, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v25, v8, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v23, v6, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v9, s7
+; GFX8-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GFX8-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
+; GFX8-NEXT:    v_mov_b32_e32 v8, s6
 ; GFX8-NEXT:    s_add_u32 s6, s0, 32
-; GFX8-NEXT:    v_bfe_i32 v2, v1, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v16, v0, 0, 1
+; GFX8-NEXT:    flat_store_dwordx4 v[8:9], v[23:26]
 ; GFX8-NEXT:    s_addc_u32 s7, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v0, s6
-; GFX8-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GFX8-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GFX8-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8-NEXT:    v_bfe_i32 v25, v5, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v23, v4, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v4, s6
+; GFX8-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GFX8-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
+; GFX8-NEXT:    v_mov_b32_e32 v5, s7
 ; GFX8-NEXT:    s_add_u32 s6, s0, 16
-; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[16:19]
-; GFX8-NEXT:    v_bfe_i32 v20, v20, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v18, v22, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v22, v21, 0, 1
+; GFX8-NEXT:    flat_store_dwordx4 v[4:5], v[23:26]
 ; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    v_bfe_i32 v25, v1, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v23, v0, 0, 1
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s6
-; GFX8-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
-; GFX8-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GFX8-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GFX8-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s7
-; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[20:23]
+; GFX8-NEXT:    v_bfe_i32 v6, v7, 0, 1
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[23:26]
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s0
-; GFX8-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GFX8-NEXT:    v_mov_b32_e32 v16, s4
-; GFX8-NEXT:    v_mov_b32_e32 v17, s5
+; GFX8-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
+; GFX8-NEXT:    v_mov_b32_e32 v4, s4
+; GFX8-NEXT:    v_mov_b32_e32 v5, s5
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s1
 ; GFX8-NEXT:    s_add_u32 s4, s0, 0xf0
-; GFX8-NEXT:    v_bfe_i32 v14, v13, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v12, v12, 0, 1
-; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[16:19]
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[4:7]
 ; GFX8-NEXT:    s_addc_u32 s5, s1, 0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
+; GFX8-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8-NEXT:    s_add_u32 s4, s0, 0xe0
-; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[12:15]
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[10:13]
 ; GFX8-NEXT:    s_addc_u32 s5, s1, 0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8-NEXT:    v_bfe_i32 v10, v9, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v8, v8, 0, 1
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8-NEXT:    s_add_u32 s4, s0, 0xd0
-; GFX8-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
-; GFX8-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
+; GFX8-NEXT:    v_bfe_i32 v17, v18, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v15, v16, 0, 1
 ; GFX8-NEXT:    s_addc_u32 s5, s1, 0
-; GFX8-NEXT:    v_bfe_i32 v6, v6, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v4, v5, 0, 1
-; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[8:11]
-; GFX8-NEXT:    v_mov_b32_e32 v0, s4
+; GFX8-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GFX8-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
 ; GFX8-NEXT:    s_add_u32 s0, s0, 0xc0
-; GFX8-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
-; GFX8-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
-; GFX8-NEXT:    v_mov_b32_e32 v1, s5
+; GFX8-NEXT:    v_bfe_i32 v21, v20, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v19, v19, 0, 1
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[15:18]
+; GFX8-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX8-NEXT:    s_addc_u32 s1, s1, 0
-; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[4:7]
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; GFX8-NEXT:    v_bfe_i32 v2, v27, 0, 1
+; GFX8-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
+; GFX8-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
+; GFX8-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8-NEXT:    v_mov_b32_e32 v5, s1
+; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[19:22]
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s3
 ; GFX8-NEXT:    v_mov_b32_e32 v4, s0
@@ -7204,34 +7204,36 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o
 ; GFX8-LABEL: constant_sextload_v64i1_to_v64i64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x24
-; GFX8-NEXT:    s_mov_b32 s13, 0
-; GFX8-NEXT:    s_mov_b32 s11, s13
+; GFX8-NEXT:    s_mov_b32 s7, 0
+; GFX8-NEXT:    s_mov_b32 s13, s7
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_load_dwordx2 s[8:9], s[2:3], 0x0
+; GFX8-NEXT:    s_load_dwordx2 s[10:11], s[2:3], 0x0
+; GFX8-NEXT:    v_mov_b32_e32 v29, s1
+; GFX8-NEXT:    v_mov_b32_e32 v28, s0
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NEXT:    s_lshr_b32 s16, s9, 22
-; GFX8-NEXT:    s_lshr_b32 s18, s9, 23
-; GFX8-NEXT:    s_lshr_b32 s20, s9, 20
-; GFX8-NEXT:    s_lshr_b32 s22, s9, 21
-; GFX8-NEXT:    s_lshr_b32 s24, s9, 18
-; GFX8-NEXT:    s_lshr_b32 s26, s9, 19
-; GFX8-NEXT:    s_lshr_b32 s28, s9, 16
-; GFX8-NEXT:    s_lshr_b32 s30, s9, 17
-; GFX8-NEXT:    s_lshr_b32 s34, s8, 22
-; GFX8-NEXT:    s_lshr_b32 s36, s8, 23
-; GFX8-NEXT:    s_lshr_b32 s38, s8, 20
-; GFX8-NEXT:    s_lshr_b32 s40, s8, 21
-; GFX8-NEXT:    s_lshr_b32 s42, s8, 18
-; GFX8-NEXT:    s_lshr_b32 s44, s8, 19
-; GFX8-NEXT:    s_lshr_b32 s46, s8, 16
-; GFX8-NEXT:    s_lshr_b32 s48, s8, 17
-; GFX8-NEXT:    s_mov_b32 s12, s9
-; GFX8-NEXT:    s_lshr_b32 s10, s9, 24
-; GFX8-NEXT:    s_lshr_b32 s6, s8, 24
-; GFX8-NEXT:    s_bfe_i64 s[2:3], s[6:7], 0x10000
-; GFX8-NEXT:    s_bfe_i64 s[4:5], s[10:11], 0x10000
-; GFX8-NEXT:    s_bfe_i64 s[12:13], s[12:13], 0x10000
-; GFX8-NEXT:    s_bfe_i64 s[14:15], s[8:9], 0x10000
+; GFX8-NEXT:    s_lshr_b32 s16, s11, 22
+; GFX8-NEXT:    s_lshr_b32 s18, s11, 23
+; GFX8-NEXT:    s_lshr_b32 s20, s11, 20
+; GFX8-NEXT:    s_lshr_b32 s22, s11, 21
+; GFX8-NEXT:    s_lshr_b32 s24, s11, 18
+; GFX8-NEXT:    s_lshr_b32 s26, s11, 19
+; GFX8-NEXT:    s_lshr_b32 s28, s11, 16
+; GFX8-NEXT:    s_lshr_b32 s30, s11, 17
+; GFX8-NEXT:    s_lshr_b32 s34, s10, 22
+; GFX8-NEXT:    s_lshr_b32 s36, s10, 23
+; GFX8-NEXT:    s_lshr_b32 s38, s10, 20
+; GFX8-NEXT:    s_lshr_b32 s40, s10, 21
+; GFX8-NEXT:    s_lshr_b32 s42, s10, 18
+; GFX8-NEXT:    s_lshr_b32 s44, s10, 19
+; GFX8-NEXT:    s_lshr_b32 s46, s10, 16
+; GFX8-NEXT:    s_lshr_b32 s48, s10, 17
+; GFX8-NEXT:    s_mov_b32 s6, s11
+; GFX8-NEXT:    s_lshr_b32 s12, s11, 24
+; GFX8-NEXT:    s_lshr_b32 s8, s10, 24
+; GFX8-NEXT:    s_bfe_i64 s[2:3], s[8:9], 0x10000
+; GFX8-NEXT:    s_bfe_i64 s[4:5], s[12:13], 0x10000
+; GFX8-NEXT:    s_bfe_i64 s[6:7], s[6:7], 0x10000
+; GFX8-NEXT:    s_bfe_i64 s[14:15], s[10:11], 0x10000
 ; GFX8-NEXT:    s_bfe_i64 s[48:49], s[48:49], 0x10000
 ; GFX8-NEXT:    s_bfe_i64 s[46:47], s[46:47], 0x10000
 ; GFX8-NEXT:    s_bfe_i64 s[44:45], s[44:45], 0x10000
@@ -7248,323 +7250,321 @@ define amdgpu_kernel void @constant_sextload_v64i1_to_v64i64(ptr addrspace(1) %o
 ; GFX8-NEXT:    s_bfe_i64 s[20:21], s[20:21], 0x10000
 ; GFX8-NEXT:    s_bfe_i64 s[18:19], s[18:19], 0x10000
 ; GFX8-NEXT:    s_bfe_i64 s[16:17], s[16:17], 0x10000
-; GFX8-NEXT:    v_mov_b32_e32 v11, s16
+; GFX8-NEXT:    v_mov_b32_e32 v22, s16
 ; GFX8-NEXT:    s_add_u32 s16, s0, 0x1b0
-; GFX8-NEXT:    v_mov_b32_e32 v12, s17
+; GFX8-NEXT:    v_mov_b32_e32 v23, s17
 ; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_mov_b32_e32 v13, s18
-; GFX8-NEXT:    v_mov_b32_e32 v14, s19
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
+; GFX8-NEXT:    v_mov_b32_e32 v27, s17
+; GFX8-NEXT:    v_mov_b32_e32 v26, s16
 ; GFX8-NEXT:    s_add_u32 s16, s0, 0x1a0
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
+; GFX8-NEXT:    v_mov_b32_e32 v24, s18
+; GFX8-NEXT:    v_mov_b32_e32 v25, s19
 ; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_mov_b32_e32 v11, s20
-; GFX8-NEXT:    v_mov_b32_e32 v12, s21
-; GFX8-NEXT:    v_mov_b32_e32 v13, s22
-; GFX8-NEXT:    v_mov_b32_e32 v14, s23
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
+; GFX8-NEXT:    flat_store_dwordx4 v[26:27], v[22:25]
+; GFX8-NEXT:    v_mov_b32_e32 v27, s17
+; GFX8-NEXT:    v_mov_b32_e32 v26, s16
 ; GFX8-NEXT:    s_add_u32 s16, s0, 0x190
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
+; GFX8-NEXT:    v_mov_b32_e32 v22, s20
+; GFX8-NEXT:    v_mov_b32_e32 v23, s21
+; GFX8-NEXT:    v_mov_b32_e32 v24, s22
+; GFX8-NEXT:    v_mov_b32_e32 v25, s23
 ; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_mov_b32_e32 v11, s24
-; GFX8-NEXT:    v_mov_b32_e32 v12, s25
-; GFX8-NEXT:    v_mov_b32_e32 v13, s26
-; GFX8-NEXT:    v_mov_b32_e32 v14, s27
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
+; GFX8-NEXT:    flat_store_dwordx4 v[26:27], v[22:25]
+; GFX8-NEXT:    v_mov_b32_e32 v27, s17
+; GFX8-NEXT:    v_mov_b32_e32 v26, s16
 ; GFX8-NEXT:    s_add_u32 s16, s0, 0x180
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
+; GFX8-NEXT:    v_mov_b32_e32 v22, s24
+; GFX8-NEXT:    v_mov_b32_e32 v23, s25
+; GFX8-NEXT:    v_mov_b32_e32 v24, s26
+; GFX8-NEXT:    v_mov_b32_e32 v25, s27
 ; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_mov_b32_e32 v11, s28
-; GFX8-NEXT:    v_mov_b32_e32 v12, s29
-; GFX8-NEXT:    v_mov_b32_e32 v13, s30
-; GFX8-NEXT:    v_mov_b32_e32 v14, s31
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
+; GFX8-NEXT:    flat_store_dwordx4 v[26:27], v[22:25]
+; GFX8-NEXT:    v_mov_b32_e32 v27, s17
+; GFX8-NEXT:    v_mov_b32_e32 v26, s16
 ; GFX8-NEXT:    s_add_u32 s16, s0, 0xb0
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
+; GFX8-NEXT:    v_mov_b32_e32 v22, s28
+; GFX8-NEXT:    v_mov_b32_e32 v23, s29
+; GFX8-NEXT:    v_mov_b32_e32 v24, s30
+; GFX8-NEXT:    v_mov_b32_e32 v25, s31
 ; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_mov_b32_e32 v11, s34
-; GFX8-NEXT:    v_mov_b32_e32 v12, s35
-; GFX8-NEXT:    v_mov_b32_e32 v13, s36
-; GFX8-NEXT:    v_mov_b32_e32 v14, s37
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
+; GFX8-NEXT:    flat_store_dwordx4 v[26:27], v[22:25]
+; GFX8-NEXT:    v_mov_b32_e32 v27, s17
+; GFX8-NEXT:    v_mov_b32_e32 v26, s16
 ; GFX8-NEXT:    s_add_u32 s16, s0, 0xa0
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
+; GFX8-NEXT:    v_mov_b32_e32 v22, s34
+; GFX8-NEXT:    v_mov_b32_e32 v23, s35
+; GFX8-NEXT:    v_mov_b32_e32 v24, s36
+; GFX8-NEXT:    v_mov_b32_e32 v25, s37
 ; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_mov_b32_e32 v11, s38
-; GFX8-NEXT:    v_mov_b32_e32 v12, s39
-; GFX8-NEXT:    v_mov_b32_e32 v13, s40
-; GFX8-NEXT:    v_mov_b32_e32 v14, s41
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
+; GFX8-NEXT:    flat_store_dwordx4 v[26:27], v[22:25]
+; GFX8-NEXT:    v_mov_b32_e32 v27, s17
+; GFX8-NEXT:    v_mov_b32_e32 v26, s16
 ; GFX8-NEXT:    s_add_u32 s16, s0, 0x90
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
+; GFX8-NEXT:    v_mov_b32_e32 v22, s38
+; GFX8-NEXT:    v_mov_b32_e32 v23, s39
+; GFX8-NEXT:    v_mov_b32_e32 v24, s40
+; GFX8-NEXT:    v_mov_b32_e32 v25, s41
 ; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_mov_b32_e32 v11, s42
-; GFX8-NEXT:    v_mov_b32_e32 v12, s43
-; GFX8-NEXT:    v_mov_b32_e32 v13, s44
-; GFX8-NEXT:    v_mov_b32_e32 v14, s45
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
+; GFX8-NEXT:    flat_store_dwordx4 v[26:27], v[22:25]
+; GFX8-NEXT:    v_mov_b32_e32 v27, s17
+; GFX8-NEXT:    v_mov_b32_e32 v26, s16
 ; GFX8-NEXT:    s_add_u32 s16, s0, 0x80
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
+; GFX8-NEXT:    v_mov_b32_e32 v22, s42
+; GFX8-NEXT:    v_mov_b32_e32 v23, s43
+; GFX8-NEXT:    v_mov_b32_e32 v24, s44
+; GFX8-NEXT:    v_mov_b32_e32 v25, s45
 ; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_lshrrev_b16_e64 v9, 14, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v10, 15, s8
-; GFX8-NEXT:    v_mov_b32_e32 v11, s46
-; GFX8-NEXT:    v_mov_b32_e32 v12, s47
-; GFX8-NEXT:    v_mov_b32_e32 v13, s48
-; GFX8-NEXT:    v_mov_b32_e32 v14, s49
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
+; GFX8-NEXT:    flat_store_dwordx4 v[26:27], v[22:25]
+; GFX8-NEXT:    v_mov_b32_e32 v27, s17
+; GFX8-NEXT:    v_mov_b32_e32 v26, s16
 ; GFX8-NEXT:    s_add_u32 s16, s0, 0x70
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[11:14]
-; GFX8-NEXT:    v_bfe_i32 v9, v9, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v11, v10, 0, 1
+; GFX8-NEXT:    v_lshrrev_b16_e64 v20, 14, s10
+; GFX8-NEXT:    v_lshrrev_b16_e64 v21, 15, s10
+; GFX8-NEXT:    v_mov_b32_e32 v22, s46
+; GFX8-NEXT:    v_mov_b32_e32 v23, s47
+; GFX8-NEXT:    v_mov_b32_e32 v24, s48
+; GFX8-NEXT:    v_mov_b32_e32 v25, s49
 ; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_lshrrev_b16_e64 v7, 12, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v8, 13, s8
-; GFX8-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
-; GFX8-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
+; GFX8-NEXT:    flat_store_dwordx4 v[26:27], v[22:25]
+; GFX8-NEXT:    v_bfe_i32 v26, v21, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v24, v20, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v21, s17
+; GFX8-NEXT:    v_mov_b32_e32 v20, s16
 ; GFX8-NEXT:    s_add_u32 s16, s0, 0x60
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[9:12]
-; GFX8-NEXT:    v_bfe_i32 v7, v7, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v9, v8, 0, 1
+; GFX8-NEXT:    v_lshrrev_b16_e64 v18, 12, s10
+; GFX8-NEXT:    v_lshrrev_b16_e64 v19, 13, s10
+; GFX8-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
+; GFX8-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
 ; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_lshrrev_b16_e64 v5, 10, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v6, 11, s8
-; GFX8-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
-; GFX8-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
+; GFX8-NEXT:    flat_store_dwordx4 v[20:21], v[24:27]
+; GFX8-NEXT:    v_lshrrev_b16_e64 v16, 10, s10
+; GFX8-NEXT:    v_bfe_i32 v26, v19, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v24, v18, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v19, s17
+; GFX8-NEXT:    v_lshrrev_b16_e64 v17, 11, s10
+; GFX8-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
+; GFX8-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
+; GFX8-NEXT:    v_mov_b32_e32 v18, s16
 ; GFX8-NEXT:    s_add_u32 s16, s0, 0x50
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[7:10]
-; GFX8-NEXT:    v_bfe_i32 v5, v5, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v7, v6, 0, 1
+; GFX8-NEXT:    flat_store_dwordx4 v[18:19], v[24:27]
 ; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_lshrrev_b16_e64 v3, 8, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v4, 9, s8
-; GFX8-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
-; GFX8-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
-; GFX8-NEXT:    s_add_u32 s16, s0, 64
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[5:8]
+; GFX8-NEXT:    v_bfe_i32 v26, v17, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v24, v16, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v16, s16
+; GFX8-NEXT:    v_lshrrev_b16_e64 v14, 8, s10
+; GFX8-NEXT:    v_lshrrev_b16_e64 v15, 9, s10
+; GFX8-NEXT:    v_lshrrev_b16_e64 v12, 6, s10
+; GFX8-NEXT:    v_lshrrev_b16_e64 v13, 7, s10
+; GFX8-NEXT:    v_lshrrev_b16_e64 v10, 4, s10
+; GFX8-NEXT:    v_lshrrev_b16_e64 v11, 5, s10
+; GFX8-NEXT:    v_lshrrev_b16_e64 v8, 2, s10
+; GFX8-NEXT:    v_lshrrev_b16_e64 v9, 3, s10
+; GFX8-NEXT:    v_lshrrev_b16_e64 v7, 1, s10
+; GFX8-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
+; GFX8-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
+; GFX8-NEXT:    v_mov_b32_e32 v17, s17
+; GFX8-NEXT:    s_add_u32 s10, s0, 64
+; GFX8-NEXT:    v_lshrrev_b16_e64 v5, 14, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v6, 15, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v3, 12, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v4, 13, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v1, 10, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v2, 11, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v0, 8, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v23, 9, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v22, 6, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v21, 7, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v20, 4, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v19, 5, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v18, 2, s11
+; GFX8-NEXT:    flat_store_dwordx4 v[16:17], v[24:27]
+; GFX8-NEXT:    v_lshrrev_b16_e64 v17, 3, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v16, 1, s11
+; GFX8-NEXT:    s_addc_u32 s11, s1, 0
+; GFX8-NEXT:    v_bfe_i32 v26, v15, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v24, v14, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v15, s11
+; GFX8-NEXT:    v_mov_b32_e32 v14, s10
+; GFX8-NEXT:    s_add_u32 s10, s0, 48
+; GFX8-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
+; GFX8-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
+; GFX8-NEXT:    s_addc_u32 s11, s1, 0
+; GFX8-NEXT:    flat_store_dwordx4 v[14:15], v[24:27]
 ; GFX8-NEXT:    v_bfe_i32 v3, v3, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v26, v13, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v24, v12, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v13, s11
+; GFX8-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
+; GFX8-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
+; GFX8-NEXT:    v_mov_b32_e32 v12, s10
+; GFX8-NEXT:    s_add_u32 s10, s0, 32
+; GFX8-NEXT:    flat_store_dwordx4 v[12:13], v[24:27]
+; GFX8-NEXT:    s_addc_u32 s11, s1, 0
+; GFX8-NEXT:    v_bfe_i32 v26, v11, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v24, v10, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v10, s10
+; GFX8-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
+; GFX8-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
+; GFX8-NEXT:    v_mov_b32_e32 v11, s11
+; GFX8-NEXT:    s_add_u32 s10, s0, 16
+; GFX8-NEXT:    flat_store_dwordx4 v[10:11], v[24:27]
+; GFX8-NEXT:    s_addc_u32 s11, s1, 0
+; GFX8-NEXT:    v_bfe_i32 v26, v9, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v24, v8, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v8, s10
+; GFX8-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
+; GFX8-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
+; GFX8-NEXT:    v_mov_b32_e32 v9, s11
+; GFX8-NEXT:    flat_store_dwordx4 v[8:9], v[24:27]
+; GFX8-NEXT:    s_add_u32 s10, s0, 0x170
+; GFX8-NEXT:    v_bfe_i32 v26, v7, 0, 1
+; GFX8-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
+; GFX8-NEXT:    v_mov_b32_e32 v24, s14
+; GFX8-NEXT:    v_mov_b32_e32 v25, s15
+; GFX8-NEXT:    flat_store_dwordx4 v[28:29], v[24:27]
+; GFX8-NEXT:    s_addc_u32 s11, s1, 0
+; GFX8-NEXT:    v_bfe_i32 v26, v6, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v24, v5, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v5, s10
+; GFX8-NEXT:    v_mov_b32_e32 v6, s11
+; GFX8-NEXT:    s_add_u32 s10, s0, 0x160
+; GFX8-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
+; GFX8-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
+; GFX8-NEXT:    s_addc_u32 s11, s1, 0
+; GFX8-NEXT:    flat_store_dwordx4 v[5:6], v[24:27]
 ; GFX8-NEXT:    v_bfe_i32 v5, v4, 0, 1
-; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
-; GFX8-NEXT:    v_lshrrev_b16_e64 v1, 6, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v2, 7, s8
+; GFX8-NEXT:    v_mov_b32_e32 v25, s11
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
-; GFX8-NEXT:    s_add_u32 s16, s0, 48
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[3:6]
+; GFX8-NEXT:    v_mov_b32_e32 v24, s10
+; GFX8-NEXT:    s_add_u32 s10, s0, 0x150
+; GFX8-NEXT:    flat_store_dwordx4 v[24:25], v[3:6]
 ; GFX8-NEXT:    v_bfe_i32 v1, v1, 0, 1
 ; GFX8-NEXT:    v_bfe_i32 v3, v2, 0, 1
-; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v15, s16
+; GFX8-NEXT:    s_addc_u32 s11, s1, 0
+; GFX8-NEXT:    v_mov_b32_e32 v5, s10
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; GFX8-NEXT:    v_mov_b32_e32 v16, s17
-; GFX8-NEXT:    s_add_u32 s16, s0, 32
-; GFX8-NEXT:    v_lshrrev_b16_e64 v0, 4, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v13, 5, s8
-; GFX8-NEXT:    flat_store_dwordx4 v[15:16], v[1:4]
-; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v16, s16
-; GFX8-NEXT:    v_bfe_i32 v2, v13, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v17, s17
-; GFX8-NEXT:    s_add_u32 s16, s0, 16
-; GFX8-NEXT:    v_lshrrev_b16_e64 v14, 2, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v11, 3, s8
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX8-NEXT:    s_addc_u32 s17, s1, 0
-; GFX8-NEXT:    flat_store_dwordx4 v[16:17], v[0:3]
-; GFX8-NEXT:    v_mov_b32_e32 v18, s17
-; GFX8-NEXT:    v_bfe_i32 v2, v11, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v0, v14, 0, 1
-; GFX8-NEXT:    v_lshrrev_b16_e64 v12, 1, s8
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX8-NEXT:    v_mov_b32_e32 v17, s16
-; GFX8-NEXT:    flat_store_dwordx4 v[17:18], v[0:3]
-; GFX8-NEXT:    v_mov_b32_e32 v18, s1
-; GFX8-NEXT:    v_bfe_i32 v2, v12, 0, 1
-; GFX8-NEXT:    v_lshrrev_b16_e64 v9, 14, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v10, 15, s9
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX8-NEXT:    v_mov_b32_e32 v0, s14
-; GFX8-NEXT:    v_mov_b32_e32 v1, s15
-; GFX8-NEXT:    v_mov_b32_e32 v17, s0
-; GFX8-NEXT:    s_add_u32 s14, s0, 0x170
-; GFX8-NEXT:    flat_store_dwordx4 v[17:18], v[0:3]
-; GFX8-NEXT:    s_addc_u32 s15, s1, 0
-; GFX8-NEXT:    v_bfe_i32 v2, v10, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v0, v9, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v9, s14
-; GFX8-NEXT:    v_lshrrev_b16_e64 v7, 12, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v8, 13, s9
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX8-NEXT:    v_mov_b32_e32 v10, s15
-; GFX8-NEXT:    s_add_u32 s8, s0, 0x160
-; GFX8-NEXT:    v_lshrrev_b16_e64 v5, 10, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v6, 11, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v4, 8, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v15, 9, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v13, 6, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v16, 7, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v11, 4, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v12, 5, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v14, 2, s9
-; GFX8-NEXT:    flat_store_dwordx4 v[9:10], v[0:3]
-; GFX8-NEXT:    v_lshrrev_b16_e64 v9, 3, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v10, 1, s9
-; GFX8-NEXT:    v_bfe_i32 v2, v8, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v0, v7, 0, 1
-; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    v_mov_b32_e32 v7, s8
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX8-NEXT:    v_mov_b32_e32 v8, s9
-; GFX8-NEXT:    s_add_u32 s8, s0, 0x150
-; GFX8-NEXT:    flat_store_dwordx4 v[7:8], v[0:3]
-; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    v_bfe_i32 v2, v6, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v0, v5, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v5, s8
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX8-NEXT:    v_mov_b32_e32 v6, s9
+; GFX8-NEXT:    v_mov_b32_e32 v6, s11
+; GFX8-NEXT:    v_lshrrev_b16_e64 v9, 6, s8
+; GFX8-NEXT:    v_lshrrev_b16_e64 v28, 7, s8
+; GFX8-NEXT:    v_lshrrev_b16_e64 v26, 4, s8
+; GFX8-NEXT:    v_lshrrev_b16_e64 v27, 5, s8
+; GFX8-NEXT:    v_lshrrev_b16_e64 v24, 2, s8
+; GFX8-NEXT:    v_lshrrev_b16_e64 v25, 3, s8
+; GFX8-NEXT:    flat_store_dwordx4 v[5:6], v[1:4]
+; GFX8-NEXT:    v_lshrrev_b16_e64 v6, 1, s8
 ; GFX8-NEXT:    s_add_u32 s8, s0, 0x140
-; GFX8-NEXT:    flat_store_dwordx4 v[5:6], v[0:3]
+; GFX8-NEXT:    v_bfe_i32 v2, v23, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v0, v0, 0, 1
 ; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    v_bfe_i32 v2, v15, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v0, v4, 0, 1
 ; GFX8-NEXT:    v_mov_b32_e32 v4, s8
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GFX8-NEXT:    v_mov_b32_e32 v5, s9
 ; GFX8-NEXT:    s_add_u32 s8, s0, 0x130
 ; GFX8-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NEXT:    v_bfe_i32 v4, v22, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v2, v6, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v6, v21, 0, 1
 ; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    v_bfe_i32 v2, v16, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v0, v13, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v4, s8
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX8-NEXT:    v_mov_b32_e32 v5, s9
+; GFX8-NEXT:    v_mov_b32_e32 v0, s8
+; GFX8-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
+; GFX8-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
+; GFX8-NEXT:    v_mov_b32_e32 v1, s9
 ; GFX8-NEXT:    s_add_u32 s8, s0, 0x120
-; GFX8-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[4:7]
+; GFX8-NEXT:    v_bfe_i32 v21, v19, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v19, v20, 0, 1
 ; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    v_bfe_i32 v2, v12, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v0, v11, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v4, s8
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX8-NEXT:    v_mov_b32_e32 v5, s9
+; GFX8-NEXT:    v_mov_b32_e32 v0, s8
+; GFX8-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
+; GFX8-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
+; GFX8-NEXT:    v_mov_b32_e32 v1, s9
 ; GFX8-NEXT:    s_add_u32 s8, s0, 0x110
-; GFX8-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    v_bfe_i32 v2, v9, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v0, v14, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v4, s8
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX8-NEXT:    v_mov_b32_e32 v5, s9
-; GFX8-NEXT:    s_add_u32 s8, s0, 0x100
-; GFX8-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
-; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    v_bfe_i32 v2, v10, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v4, s8
-; GFX8-NEXT:    v_lshrrev_b16_e64 v7, 6, s10
-; GFX8-NEXT:    v_lshrrev_b16_e64 v8, 7, s10
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX8-NEXT:    v_mov_b32_e32 v0, s12
-; GFX8-NEXT:    v_mov_b32_e32 v1, s13
-; GFX8-NEXT:    v_mov_b32_e32 v5, s9
-; GFX8-NEXT:    s_add_u32 s8, s0, 0x1f0
-; GFX8-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
+; GFX8-NEXT:    v_bfe_i32 v6, v25, 0, 1
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[19:22]
+; GFX8-NEXT:    v_bfe_i32 v25, v17, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v23, v18, 0, 1
 ; GFX8-NEXT:    s_addc_u32 s9, s1, 0
-; GFX8-NEXT:    v_bfe_i32 v2, v8, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v0, v7, 0, 1
-; GFX8-NEXT:    v_mov_b32_e32 v6, s8
-; GFX8-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GFX8-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GFX8-NEXT:    v_mov_b32_e32 v7, s9
-; GFX8-NEXT:    v_lshrrev_b16_e64 v17, 4, s10
-; GFX8-NEXT:    v_lshrrev_b16_e64 v18, 5, s10
-; GFX8-NEXT:    v_lshrrev_b16_e64 v13, 6, s6
-; GFX8-NEXT:    v_lshrrev_b16_e64 v12, 7, s6
-; GFX8-NEXT:    v_lshrrev_b16_e64 v15, 4, s6
-; GFX8-NEXT:    v_lshrrev_b16_e64 v9, 5, s6
-; GFX8-NEXT:    v_lshrrev_b16_e64 v4, 2, s6
-; GFX8-NEXT:    v_lshrrev_b16_e64 v5, 3, s6
-; GFX8-NEXT:    flat_store_dwordx4 v[6:7], v[0:3]
-; GFX8-NEXT:    v_bfe_i32 v18, v18, 0, 1
-; GFX8-NEXT:    v_lshrrev_b16_e64 v0, 1, s6
-; GFX8-NEXT:    s_add_u32 s6, s0, 0x1e0
-; GFX8-NEXT:    v_bfe_i32 v2, v0, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v16, v17, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v0, s8
+; GFX8-NEXT:    v_bfe_i32 v4, v24, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v19, v26, 0, 1
+; GFX8-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GFX8-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
+; GFX8-NEXT:    v_mov_b32_e32 v1, s9
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[23:26]
+; GFX8-NEXT:    v_lshrrev_b16_e64 v14, 6, s12
+; GFX8-NEXT:    v_mov_b32_e32 v23, s6
+; GFX8-NEXT:    s_add_u32 s6, s0, 0x100
+; GFX8-NEXT:    v_bfe_i32 v25, v16, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v24, s7
 ; GFX8-NEXT:    s_addc_u32 s7, s1, 0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s6
-; GFX8-NEXT:    v_lshrrev_b16_e64 v20, 2, s10
-; GFX8-NEXT:    v_lshrrev_b16_e64 v21, 3, s10
-; GFX8-NEXT:    v_lshrrev_b16_e64 v22, 1, s10
-; GFX8-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
+; GFX8-NEXT:    v_lshrrev_b16_e64 v15, 7, s12
+; GFX8-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GFX8-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8-NEXT:    s_add_u32 s6, s0, 0x1f0
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[23:26]
+; GFX8-NEXT:    v_bfe_i32 v16, v15, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v14, v14, 0, 1
+; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8-NEXT:    v_lshrrev_b16_e64 v12, 4, s12
+; GFX8-NEXT:    v_lshrrev_b16_e64 v13, 5, s12
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GFX8-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
+; GFX8-NEXT:    v_mov_b32_e32 v1, s7
+; GFX8-NEXT:    s_add_u32 s6, s0, 0x1e0
+; GFX8-NEXT:    v_bfe_i32 v21, v27, 0, 1
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[14:17]
+; GFX8-NEXT:    v_bfe_i32 v29, v13, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v27, v12, 0, 1
+; GFX8-NEXT:    s_addc_u32 s7, s1, 0
+; GFX8-NEXT:    v_mov_b32_e32 v0, s6
+; GFX8-NEXT:    v_lshrrev_b16_e64 v10, 2, s12
+; GFX8-NEXT:    v_lshrrev_b16_e64 v11, 3, s12
+; GFX8-NEXT:    v_bfe_i32 v25, v28, 0, 1
+; GFX8-NEXT:    v_ashrrev_i32_e32 v30, 31, v29
+; GFX8-NEXT:    v_ashrrev_i32_e32 v28, 31, v27
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s7
 ; GFX8-NEXT:    s_add_u32 s6, s0, 0x1d0
-; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[16:19]
-; GFX8-NEXT:    v_bfe_i32 v20, v20, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v18, v22, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v22, v21, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v23, v9, 0, 1
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[27:30]
+; GFX8-NEXT:    v_bfe_i32 v11, v11, 0, 1
+; GFX8-NEXT:    v_bfe_i32 v9, v10, 0, 1
 ; GFX8-NEXT:    s_addc_u32 s7, s1, 0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s6
-; GFX8-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
-; GFX8-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GFX8-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
+; GFX8-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s7
-; GFX8-NEXT:    v_mov_b32_e32 v16, s4
+; GFX8-NEXT:    v_lshrrev_b16_e64 v8, 1, s12
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[9:12]
+; GFX8-NEXT:    v_bfe_i32 v14, v8, 0, 1
+; GFX8-NEXT:    v_mov_b32_e32 v12, s4
 ; GFX8-NEXT:    s_add_u32 s4, s0, 0x1c0
-; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[20:23]
-; GFX8-NEXT:    v_mov_b32_e32 v17, s5
+; GFX8-NEXT:    v_mov_b32_e32 v13, s5
 ; GFX8-NEXT:    s_addc_u32 s5, s1, 0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
+; GFX8-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8-NEXT:    s_add_u32 s4, s0, 0xf0
-; GFX8-NEXT:    v_bfe_i32 v14, v12, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v12, v13, 0, 1
-; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[16:19]
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[12:15]
 ; GFX8-NEXT:    s_addc_u32 s5, s1, 0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8-NEXT:    v_bfe_i32 v8, v15, 0, 1
-; GFX8-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
-; GFX8-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
+; GFX8-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GFX8-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8-NEXT:    s_add_u32 s4, s0, 0xe0
-; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[12:15]
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[23:26]
 ; GFX8-NEXT:    s_addc_u32 s5, s1, 0
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s4
-; GFX8-NEXT:    v_bfe_i32 v10, v9, 0, 1
 ; GFX8-NEXT:    v_mov_b32_e32 v1, s5
 ; GFX8-NEXT:    s_add_u32 s4, s0, 0xd0
-; GFX8-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
-; GFX8-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
+; GFX8-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
+; GFX8-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
 ; GFX8-NEXT:    s_addc_u32 s5, s1, 0
-; GFX8-NEXT:    v_bfe_i32 v6, v5, 0, 1
-; GFX8-NEXT:    v_bfe_i32 v4, v4, 0, 1
-; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[8:11]
+; GFX8-NEXT:    flat_store_dwordx4 v[0:1], v[19:22]
 ; GFX8-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX8-NEXT:    s_add_u32 s0, s0, 0xc0
 ; GFX8-NEXT:    v_ashrrev_i32_e32 v7, 31, v6

diff  --git a/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
index 87913841012184..ffc2cd23ec251f 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i32.ll
@@ -1603,13 +1603,13 @@ define amdgpu_kernel void @constant_sextload_v2i32_to_v2i64(ptr addrspace(1) %ou
 ; GFX6-NOHSA-NEXT:    s_load_dwordx2 s[4:5], s[2:3], 0x0
 ; GFX6-NOHSA-NEXT:    s_mov_b32 s3, 0xf000
 ; GFX6-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s7, s5, 31
-; GFX6-NOHSA-NEXT:    s_ashr_i32 s8, s4, 31
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s6, s5, 31
+; GFX6-NOHSA-NEXT:    s_ashr_i32 s7, s4, 31
 ; GFX6-NOHSA-NEXT:    s_mov_b32 s2, -1
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v0, s4
 ; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v2, s5
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s8
-; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s7
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v1, s7
+; GFX6-NOHSA-NEXT:    v_mov_b32_e32 v3, s6
 ; GFX6-NOHSA-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
 ; GFX6-NOHSA-NEXT:    s_endpgm
 ;
@@ -1621,13 +1621,13 @@ define amdgpu_kernel void @constant_sextload_v2i32_to_v2i64(ptr addrspace(1) %ou
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX7-HSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX7-HSA-NEXT:    s_ashr_i32 s1, s3, 31
-; GFX7-HSA-NEXT:    s_mov_b32 s0, s3
+; GFX7-HSA-NEXT:    s_ashr_i32 s0, s3, 31
+; GFX7-HSA-NEXT:    s_mov_b32 s1, s3
 ; GFX7-HSA-NEXT:    s_ashr_i32 s3, s2, 31
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX7-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s0
-; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s1
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v2, s1
+; GFX7-HSA-NEXT:    v_mov_b32_e32 v3, s0
 ; GFX7-HSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX7-HSA-NEXT:    s_endpgm
 ;
@@ -1639,13 +1639,13 @@ define amdgpu_kernel void @constant_sextload_v2i32_to_v2i64(ptr addrspace(1) %ou
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s0
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s1
 ; GFX8-NOHSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX8-NOHSA-NEXT:    s_ashr_i32 s1, s3, 31
-; GFX8-NOHSA-NEXT:    s_mov_b32 s0, s3
+; GFX8-NOHSA-NEXT:    s_ashr_i32 s0, s3, 31
+; GFX8-NOHSA-NEXT:    s_mov_b32 s1, s3
 ; GFX8-NOHSA-NEXT:    s_ashr_i32 s3, s2, 31
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v1, s3
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s1
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v2, s1
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v3, s0
 ; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
 ; GFX8-NOHSA-NEXT:    s_endpgm
 ;
@@ -1678,13 +1678,12 @@ define amdgpu_kernel void @constant_sextload_v2i32_to_v2i64(ptr addrspace(1) %ou
 ; GFX9-HSA-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX9-HSA-NEXT:    s_load_dwordx2 s[2:3], s[2:3], 0x0
 ; GFX9-HSA-NEXT:    s_waitcnt lgkmcnt(0)
-; GFX9-HSA-NEXT:    s_ashr_i32 s5, s3, 31
-; GFX9-HSA-NEXT:    s_mov_b32 s4, s3
-; GFX9-HSA-NEXT:    s_ashr_i32 s3, s2, 31
+; GFX9-HSA-NEXT:    s_ashr_i32 s4, s3, 31
+; GFX9-HSA-NEXT:    s_ashr_i32 s5, s2, 31
 ; GFX9-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GFX9-HSA-NEXT:    v_mov_b32_e32 v1, s3
-; GFX9-HSA-NEXT:    v_mov_b32_e32 v2, s4
-; GFX9-HSA-NEXT:    v_mov_b32_e32 v3, s5
+; GFX9-HSA-NEXT:    v_mov_b32_e32 v1, s5
+; GFX9-HSA-NEXT:    v_mov_b32_e32 v2, s3
+; GFX9-HSA-NEXT:    v_mov_b32_e32 v3, s4
 ; GFX9-HSA-NEXT:    global_store_dwordx4 v4, v[0:3], s[0:1]
 ; GFX9-HSA-NEXT:    s_endpgm
   %ld = load <2 x i32>, ptr addrspace(4) %in

diff  --git a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
index 66fc322e5e04b5..9ebd2018798253 100644
--- a/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-constant-i8.ll
@@ -7203,10 +7203,10 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
 ; GFX8-NOHSA-NEXT:    s_mov_b32 s50, s5
 ; GFX8-NOHSA-NEXT:    s_mov_b32 s52, s3
 ; GFX8-NOHSA-NEXT:    s_mov_b32 s54, s1
-; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v5, 8, s7
+; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v4, 8, s7
 ; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v1, 8, s6
 ; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v0, 8, s5
-; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v4, 8, s4
+; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v5, 8, s4
 ; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v8, 8, s3
 ; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v9, 8, s2
 ; GFX8-NOHSA-NEXT:    v_lshrrev_b16_e64 v6, 8, s1
@@ -7235,93 +7235,93 @@ define amdgpu_kernel void @constant_sextload_v32i8_to_v32i64(ptr addrspace(1) %o
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[34:35], s[34:35], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[30:31], s[30:31], 0x80000
 ; GFX8-NOHSA-NEXT:    s_bfe_i64 s[28:29], s[28:29], 0x80000
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v12, s28
-; GFX8-NOHSA-NEXT:    s_add_u32 s28, s8, 0xf0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v13, s29
-; GFX8-NOHSA-NEXT:    s_addc_u32 s29, s9, 0
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s28
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v14, s58
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v15, s59
+; GFX8-NOHSA-NEXT:    s_add_u32 s28, s8, 0xf0
+; GFX8-NOHSA-NEXT:    v_bfe_i32 v10, v9, 0, 8
+; GFX8-NOHSA-NEXT:    v_bfe_i32 v14, v8, 0, 8
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s29
+; GFX8-NOHSA-NEXT:    s_addc_u32 s29, s9, 0
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v8, s28
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v18, s58
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v19, s59
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v9, s29
 ; GFX8-NOHSA-NEXT:    s_add_u32 s28, s8, 0xd0
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[16:17], v[12:15]
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[8:9], v[16:19]
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s29, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s28
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v12, s30
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v13, s31
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v14, s34
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v15, s35
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s29
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v8, s28
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s30
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s31
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v18, s34
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v19, s35
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v9, s29
 ; GFX8-NOHSA-NEXT:    s_add_u32 s28, s8, 0xb0
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[16:17], v[12:15]
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[8:9], v[16:19]
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s29, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s28
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v12, s36
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v13, s37
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v14, s56
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v15, s57
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s29
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v8, s28
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s36
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s37
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v18, s56
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v19, s57
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v9, s29
 ; GFX8-NOHSA-NEXT:    s_add_u32 s28, s8, 0x90
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[16:17], v[12:15]
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[8:9], v[16:19]
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s29, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s28
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v12, s38
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v13, s39
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v14, s40
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v15, s41
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s29
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[16:17], v[12:15]
-; GFX8-NOHSA-NEXT:    v_bfe_i32 v10, v9, 0, 8
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v14, s24
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v8, s28
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s38
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s39
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v18, s40
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v19, s41
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v9, s29
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[8:9], v[16:19]
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v20, s6
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v18, s24
 ; GFX8-NOHSA-NEXT:    s_add_u32 s24, s8, 0x70
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v15, s25
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v19, s25
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s25, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s24
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v12, s42
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v13, s43
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s25
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v8, s24
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s42
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s43
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v9, s25
 ; GFX8-NOHSA-NEXT:    s_add_u32 s24, s8, 0x50
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[16:17], v[12:15]
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[8:9], v[16:19]
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s25, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s24
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v12, s44
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v13, s45
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v14, s46
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v15, s47
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s25
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[16:17], v[12:15]
-; GFX8-NOHSA-NEXT:    v_bfe_i32 v18, v5, 0, 8
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v14, s18
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v8, s24
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s44
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s45
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v18, s46
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v19, s47
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v9, s25
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[8:9], v[16:19]
+; GFX8-NOHSA-NEXT:    v_bfe_i32 v22, v4, 0, 8
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v18, s18
 ; GFX8-NOHSA-NEXT:    s_add_u32 s18, s8, 48
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v15, s19
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v19, s19
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s19, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s18
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v12, s26
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v13, s27
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s19
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v8, s18
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s26
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s27
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v9, s19
 ; GFX8-NOHSA-NEXT:    s_add_u32 s18, s8, 16
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[16:17], v[12:15]
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[8:9], v[16:19]
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s19, s9, 0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s18
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v12, s22
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v13, s23
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v14, s20
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v15, s21
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s19
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[16:17], v[12:15]
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s6
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v8, s18
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v16, s22
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s23
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v18, s20
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v19, s21
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v9, s19
 ; GFX8-NOHSA-NEXT:    s_add_u32 s6, s8, 0xe0
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v17, s7
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[8:9], v[16:19]
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v21, s7
+; GFX8-NOHSA-NEXT:    v_bfe_i32 v18, v5, 0, 8
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s7, s9, 0
-; GFX8-NOHSA-NEXT:    v_bfe_i32 v14, v8, 0, 8
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v9, s7
-; GFX8-NOHSA-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v8, s6
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s6
+; GFX8-NOHSA-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
+; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v5, s7
 ; GFX8-NOHSA-NEXT:    s_add_u32 s6, s8, 0xc0
-; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[8:9], v[16:19]
-; GFX8-NOHSA-NEXT:    v_bfe_i32 v22, v1, 0, 8
-; GFX8-NOHSA-NEXT:    v_bfe_i32 v18, v4, 0, 8
+; GFX8-NOHSA-NEXT:    flat_store_dwordx4 v[4:5], v[20:23]
 ; GFX8-NOHSA-NEXT:    s_addc_u32 s7, s9, 0
+; GFX8-NOHSA-NEXT:    v_bfe_i32 v22, v1, 0, 8
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v4, s6
 ; GFX8-NOHSA-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
 ; GFX8-NOHSA-NEXT:    v_mov_b32_e32 v20, s16

diff  --git a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
index 7c80a220b72d7b..6352a98f10a117 100644
--- a/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-global-i16.ll
@@ -6299,13 +6299,13 @@ define amdgpu_kernel void @global_sextload_v8i16_to_v8i64(ptr addrspace(1) %out,
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v7, v3
 ; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v10, 16, v2
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v5, 16, v0
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v6, 16, v0
 ; GCN-HSA-NEXT:    v_ashr_i64 v[14:15], v[0:1], 48
 ; GCN-HSA-NEXT:    v_bfe_i32 v12, v1, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v4, v0, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v8, v2, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v6, v5, 0, 16
 ; GCN-HSA-NEXT:    v_ashr_i64 v[2:3], v[2:3], 48
+; GCN-HSA-NEXT:    v_bfe_i32 v6, v6, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v10, v10, 0, 16
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
 ; GCN-HSA-NEXT:    v_bfe_i32 v0, v7, 0, 16
@@ -6864,16 +6864,16 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v10, v3
 ; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v11, 16, v4
 ; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v13, 16, v2
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v17, 16, v0
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v14, 16, v0
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v8, v0, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[14:15], v[0:1], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v16, v10, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[18:19], v[2:3], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v12, v1, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v0, v2, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v10, v17, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v2, v13, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v12, v2, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[18:19], v[0:1], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v16, v1, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[2:3], v[2:3], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v0, v10, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v20, v4, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v10, v14, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v14, v13, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v22, v11, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v23, v9, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[25:26], v[6:7], 48
@@ -6886,22 +6886,22 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v4, v6, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v6, v1, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
 ; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:80
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:48
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:16
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:16
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:96
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:64
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:32
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:32
 ; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0
 ; GCN-NOHSA-SI-NEXT:    s_endpgm
 ;
@@ -6957,13 +6957,13 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    v_bfe_i32 v8, v5, 0, 16
 ; GCN-HSA-NEXT:    v_ashr_i64 v[10:11], v[6:7], 48
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v17, 16, v4
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[14:15], v[8:11]
 ; GCN-HSA-NEXT:    v_bfe_i32 v7, v6, 0, 16
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v9, 16, v4
-; GCN-HSA-NEXT:    v_bfe_i32 v6, v9, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v9, v16, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v4, v4, 0, 16
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
+; GCN-HSA-NEXT:    v_bfe_i32 v6, v17, 0, 16
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[7:10]
@@ -6971,14 +6971,14 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v11, v3
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
 ; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v16, 16, v2
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v9, 16, v0
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v17, 16, v0
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[12:13], v[4:7]
 ; GCN-HSA-NEXT:    v_ashr_i64 v[14:15], v[0:1], 48
 ; GCN-HSA-NEXT:    v_bfe_i32 v12, v1, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v8, v0, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v4, v2, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v10, v9, 0, 16
 ; GCN-HSA-NEXT:    v_ashr_i64 v[2:3], v[2:3], 48
+; GCN-HSA-NEXT:    v_bfe_i32 v10, v17, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v6, v16, 0, 16
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
 ; GCN-HSA-NEXT:    v_bfe_i32 v0, v11, 0, 16
@@ -7003,62 +7003,62 @@ define amdgpu_kernel void @global_sextload_v16i16_to_v16i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s8, s6
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s9, s7
-; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[1:4], off, s[8:11], 0
-; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[5:8], off, s[8:11], 0 offset:16
+; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[0:3], off, s[8:11], 0
+; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[4:7], off, s[8:11], 0 offset:16
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s1, s5
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(1)
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v9, v2, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v10, 16, v2
-; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v6
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v8, v0, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v9, 16, v0
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v0, v1, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v1
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v16, v2, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v14, v6, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v12, v2, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v11, 16, v2
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v2, v1, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, v8
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[14:17], off, s[0:3], 0 offset:80
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v3, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v1, 0, 16
+; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(0)
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v5
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v1, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v4
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v13, v3
 ; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v6, v4
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v12, 16, v4
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v14, 16, v7
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v8, 16, v8
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v25, v5, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v27, v1, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v11, v10, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v3, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v4, v6, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v6, v12, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v19, v8, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v23, v14, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v21, v7, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v28, 31, v27
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v20, 16, v6
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v22, v7
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v5, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v27, v6, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v4, v4, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v6, v1, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v10, v9, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v14, v11, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v19, v13, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v21, v3, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v23, v22, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v25, v7, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v29, v20, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[15:18], off, s[0:3], 0 offset:80
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v28, 31, v27
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[25:28], off, s[0:3], 0 offset:64
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:112
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[21:24], off, s[0:3], 0 offset:96
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:48
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:32
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[9:12], off, s[0:3], 0 offset:16
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v30, 31, v29
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:112
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[27:30], off, s[0:3], 0 offset:96
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[19:22], off, s[0:3], 0 offset:48
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:32
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0
 ; GCN-NOHSA-VI-NEXT:    s_endpgm
 ;
 ; EG-LABEL: global_sextload_v16i16_to_v16i64:
@@ -7980,71 +7980,70 @@ define amdgpu_kernel void @global_sextload_v32i16_to_v32i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-SI-NEXT:    s_mov_b32 s1, s5
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt vmcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v18, v3
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v22, v7
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v23, v11
-; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v17, v15
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v16, 16, v2
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v18, v18, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[20:21], v[2:3], 48
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:240
-; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[20:21], v[0:1], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v18, v1, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:208
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v24, 16, v4
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v22, v3
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v26, v7
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v27, v11
+; GCN-NOHSA-SI-NEXT:    v_mov_b32_e32 v21, v15
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v20, 16, v2
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v16, 16, v4
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v17, 16, v10
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v18, 16, v8
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v19, 16, v14
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v22, v22, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[24:25], v[2:3], 48
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:240
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v18, v22, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[20:21], v[6:7], 48
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:176
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[24:25], v[0:1], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v22, v1, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:208
+; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v3, 16, v12
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[20:21], v[4:5], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v18, v5, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:144
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v22, 16, v10
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v22, v26, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[24:25], v[6:7], 48
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:176
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v18, v23, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[20:21], v[10:11], 48
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:112
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[24:25], v[4:5], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v22, v5, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:144
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[20:21], v[8:9], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v18, v9, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:80
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v9, 16, v8
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v17, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[24:25], v[10:11], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v22, v27, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:112
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[19:20], v[14:15], 48
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:48
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[24:25], v[8:9], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v22, v9, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[22:25], off, s[0:3], 0 offset:80
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[19:20], v[12:13], 48
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v13, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:16
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v7, 16, v14
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[23:24], v[14:15], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v21, v21, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[21:24], off, s[0:3], 0 offset:48
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v16, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v15, v2, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
-; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[15:18], off, s[0:3], 0 offset:224
-; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v1, 16, v12
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v3, v1, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashr_i64 v[23:24], v[12:13], 48
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v21, v13, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[21:24], off, s[0:3], 0 offset:16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v1, v12, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v5, v14, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v7, v7, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v11, v9, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v3, v3, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v13, v20, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v11, v2, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
+; GCN-NOHSA-SI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
+; GCN-NOHSA-SI-NEXT:    buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:224
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v9, v8, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v13, v10, 0, 16
 ; GCN-NOHSA-SI-NEXT:    s_waitcnt expcnt(0)
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v15, v22, 0, 16
-; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v19, v24, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v13, v10, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v7, v19, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v11, v18, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v15, v17, 0, 16
+; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v19, v16, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v17, v4, 0, 16
 ; GCN-NOHSA-SI-NEXT:    v_lshrrev_b32_e32 v2, 16, v6
 ; GCN-NOHSA-SI-NEXT:    v_bfe_i32 v21, v6, 0, 16
@@ -8099,154 +8098,154 @@ define amdgpu_kernel void @global_sextload_v32i16_to_v32i64(ptr addrspace(1) %ou
 ; GCN-HSA-NEXT:    flat_load_dwordx4 v[12:15], v[12:13]
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 48
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s2
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 16
+; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 16
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xf0
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xf0
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xd0
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v27, s3
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v26, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xd0
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xb0
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s1
-; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s0
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(3)
 ; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[8:9], 48
 ; GCN-HSA-NEXT:    v_bfe_i32 v16, v9, 0, 16
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[16:19]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s2
-; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xb0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[16:19]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s3
+; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s2
+; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x90
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
-; GCN-HSA-NEXT:    s_add_u32 s4, s0, 0x90
-; GCN-HSA-NEXT:    v_mov_b32_e32 v9, v11
+; GCN-HSA-NEXT:    s_add_u32 s4, s0, 0x70
 ; GCN-HSA-NEXT:    s_addc_u32 s5, s1, 0
+; GCN-HSA-NEXT:    s_add_u32 s6, s0, 0x50
+; GCN-HSA-NEXT:    s_addc_u32 s7, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v9, v11
+; GCN-HSA-NEXT:    s_add_u32 s8, s0, 32
 ; GCN-HSA-NEXT:    v_bfe_i32 v16, v9, 0, 16
 ; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[10:11], 48
-; GCN-HSA-NEXT:    s_add_u32 s6, s0, 0x70
+; GCN-HSA-NEXT:    s_addc_u32 s9, s1, 0
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
 ; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v9, 16, v10
-; GCN-HSA-NEXT:    s_addc_u32 s7, s1, 0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[16:19]
-; GCN-HSA-NEXT:    s_add_u32 s8, s0, 0x50
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[16:19]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s9
 ; GCN-HSA-NEXT:    v_bfe_i32 v18, v9, 0, 16
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v9, 16, v8
 ; GCN-HSA-NEXT:    v_bfe_i32 v16, v10, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v8, v8, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v10, v9, 0, 16
-; GCN-HSA-NEXT:    s_addc_u32 s9, s1, 0
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
-; GCN-HSA-NEXT:    s_add_u32 s10, s0, 32
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[8:11]
-; GCN-HSA-NEXT:    s_addc_u32 s11, s1, 0
+; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s8
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[16:19]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v10, s3
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(5)
-; GCN-HSA-NEXT:    v_ashr_i64 v[10:11], v[0:1], 48
-; GCN-HSA-NEXT:    v_bfe_i32 v8, v1, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
+; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[0:1], 48
+; GCN-HSA-NEXT:    v_bfe_i32 v16, v1, 0, 16
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, v3
-; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s11
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[8:11]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s10
-; GCN-HSA-NEXT:    v_bfe_i32 v8, v1, 0, 16
-; GCN-HSA-NEXT:    v_ashr_i64 v[10:11], v[2:3], 48
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[16:19]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v9, s2
+; GCN-HSA-NEXT:    v_bfe_i32 v16, v1, 0, 16
+; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[2:3], 48
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[16:19]
+; GCN-HSA-NEXT:    s_waitcnt vmcnt(6)
+; GCN-HSA-NEXT:    v_mov_b32_e32 v3, v7
+; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[4:5], 48
+; GCN-HSA-NEXT:    v_bfe_i32 v16, v5, 0, 16
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[9:10], v[16:19]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v25, s7
+; GCN-HSA-NEXT:    v_bfe_i32 v16, v3, 0, 16
+; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[6:7], 48
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[16:19]
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[26:27], v[8:11]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v19, s5
+; GCN-HSA-NEXT:    v_mov_b32_e32 v24, s6
 ; GCN-HSA-NEXT:    s_waitcnt vmcnt(7)
-; GCN-HSA-NEXT:    v_ashr_i64 v[10:11], v[4:5], 48
-; GCN-HSA-NEXT:    v_bfe_i32 v8, v5, 0, 16
-; GCN-HSA-NEXT:    v_mov_b32_e32 v18, s4
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT:    v_mov_b32_e32 v3, v7
-; GCN-HSA-NEXT:    v_mov_b32_e32 v17, s3
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[18:19], v[8:11]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v16, s2
-; GCN-HSA-NEXT:    v_bfe_i32 v8, v3, 0, 16
-; GCN-HSA-NEXT:    v_ashr_i64 v[10:11], v[6:7], 48
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s9
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[16:17], v[8:11]
-; GCN-HSA-NEXT:    s_waitcnt vmcnt(8)
-; GCN-HSA-NEXT:    v_bfe_i32 v7, v13, 0, 16
-; GCN-HSA-NEXT:    v_ashr_i64 v[9:10], v[12:13], 48
-; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s8
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
+; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[12:13], 48
+; GCN-HSA-NEXT:    v_bfe_i32 v16, v13, 0, 16
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v3, v15
-; GCN-HSA-NEXT:    v_mov_b32_e32 v23, s7
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[7:10]
-; GCN-HSA-NEXT:    v_mov_b32_e32 v22, s6
-; GCN-HSA-NEXT:    v_bfe_i32 v7, v3, 0, 16
-; GCN-HSA-NEXT:    v_ashr_i64 v[9:10], v[14:15], 48
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v25, 16, v2
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v1, 16, v0
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
+; GCN-HSA-NEXT:    v_mov_b32_e32 v21, s5
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[24:25], v[16:19]
+; GCN-HSA-NEXT:    v_mov_b32_e32 v20, s4
+; GCN-HSA-NEXT:    v_bfe_i32 v16, v3, 0, 16
+; GCN-HSA-NEXT:    v_ashr_i64 v[18:19], v[14:15], 48
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v1, 16, v8
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-HSA-NEXT:    v_bfe_i32 v8, v8, 0, 16
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v24, 16, v14
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[20:21], v[16:19]
+; GCN-HSA-NEXT:    v_bfe_i32 v10, v1, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v16, v14, 0, 16
+; GCN-HSA-NEXT:    v_mov_b32_e32 v15, s1
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v26, 16, v2
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v18, 16, v12
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
+; GCN-HSA-NEXT:    v_mov_b32_e32 v14, s0
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xe0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[22:23], v[7:10]
-; GCN-HSA-NEXT:    v_bfe_i32 v19, v0, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v23, v2, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v21, v1, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v25, v25, 0, 16
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v22, 16, v0
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[14:15], v[8:11]
+; GCN-HSA-NEXT:    v_bfe_i32 v14, v18, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v18, v24, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v20, v0, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v24, v2, 0, 16
+; GCN-HSA-NEXT:    v_bfe_i32 v26, v26, 0, 16
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xc0
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[23:26]
+; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v23, 16, v6
+; GCN-HSA-NEXT:    v_bfe_i32 v22, v22, 0, 16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[24:27]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v18, 16, v6
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GCN-HSA-NEXT:    v_bfe_i32 v9, v23, 0, 16
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0xa0
-; GCN-HSA-NEXT:    v_bfe_i32 v15, v6, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v17, v18, 0, 16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[19:22]
+; GCN-HSA-NEXT:    v_bfe_i32 v7, v6, 0, 16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[20:23]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
 ; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v5, 16, v4
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v3, 16, v12
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x80
-; GCN-HSA-NEXT:    v_bfe_i32 v9, v3, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v3, v4, 0, 16
 ; GCN-HSA-NEXT:    v_bfe_i32 v5, v5, 0, 16
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[15:18]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[7:10]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_lshrrev_b32_e32 v13, 16, v14
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
 ; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v6, 31, v5
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    s_add_u32 s2, s0, 0x60
-; GCN-HSA-NEXT:    v_bfe_i32 v11, v14, 0, 16
-; GCN-HSA-NEXT:    v_bfe_i32 v13, v13, 0, 16
 ; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[3:6]
 ; GCN-HSA-NEXT:    s_addc_u32 s3, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s2
-; GCN-HSA-NEXT:    v_bfe_i32 v7, v12, 0, 16
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s3
 ; GCN-HSA-NEXT:    s_add_u32 s0, s0, 64
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[11:14]
+; GCN-HSA-NEXT:    v_bfe_i32 v12, v12, 0, 16
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[16:19]
 ; GCN-HSA-NEXT:    s_addc_u32 s1, s1, 0
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v0, s0
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
-; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
+; GCN-HSA-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
 ; GCN-HSA-NEXT:    v_mov_b32_e32 v1, s1
-; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[7:10]
+; GCN-HSA-NEXT:    flat_store_dwordx4 v[0:1], v[12:15]
 ; GCN-HSA-NEXT:    s_endpgm
 ;
 ; GCN-NOHSA-VI-LABEL: global_sextload_v32i16_to_v32i64:
@@ -8259,115 +8258,115 @@ define amdgpu_kernel void @global_sextload_v32i16_to_v32i64(ptr addrspace(1) %ou
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt lgkmcnt(0)
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s8, s6
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s9, s7
-; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[7:10], off, s[8:11], 0
-; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[15:18], off, s[8:11], 0 offset:48
-; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[11:14], off, s[8:11], 0 offset:32
-; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[3:6], off, s[8:11], 0 offset:16
+; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[5:8], off, s[8:11], 0
+; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[13:16], off, s[8:11], 0 offset:48
+; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[9:12], off, s[8:11], 0 offset:32
+; GCN-NOHSA-VI-NEXT:    buffer_load_dwordx4 v[1:4], off, s[8:11], 0 offset:16
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s0, s4
 ; GCN-NOHSA-VI-NEXT:    s_mov_b32 s1, s5
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(3)
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v0, v8, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v8
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v0, v6, 0, 16
 ; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(2)
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v8, 16, v17
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v21, v8, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v19, v17, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, v18
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v8, 16, v18
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[19:22], off, s[0:3], 0 offset:224
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v2, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v19, v8, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v15
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:240
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v8, v7, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v15, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v19, v2, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v16
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[17:20], off, s[0:3], 0 offset:192
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v16, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v2, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[15:18], off, s[0:3], 0 offset:208
-; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(5)
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v2, v14
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v15, 16, v13
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v15, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v13, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v14, 16, v14
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[15:18], off, s[0:3], 0 offset:160
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v2, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v14, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v18, v15, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v15, 16, v15
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v20, v15, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v17, 16, v6
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v6, v16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v16, 16, v16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:224
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v15, 16, v14
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v18, v6, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v20, v16, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:240
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v6, 16, v13
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v18, v13, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v14, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v15, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v11
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:176
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v7, 16, v7
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v2, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v20, v6, 0, 16
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:208
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
+; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(4)
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v13, 16, v11
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v13, 0, 16
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v11, 0, 16
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[18:21], off, s[0:3], 0 offset:192
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v19, v12
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v2, 16, v12
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:128
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v11, v12, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v2, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v17, v10
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v18, 16, v10
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v12, 16, v12
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:160
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v11, v19, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v12, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v18, v8
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v16, 16, v8
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
-; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(7)
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v10, 16, v5
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v2, v1, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v1, 16, v4
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:144
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v10, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v5, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v10, v7, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v22, v18, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v18, v1, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v1, v6
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v7, 16, v6
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v12, v9, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v9, 16, v9
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
-; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v5, 16, v3
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v24, v1, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v26, v7, 0, 16
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[13:16], off, s[0:3], 0 offset:96
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v20, v17, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v16, v4, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v14, v9, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v4, v3, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v8, 16, v9
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:176
 ; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v6, v5, 0, 16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v25, 31, v24
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v27, 31, v26
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v8, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v11, v9, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v9, 16, v10
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:128
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v9, v9, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v11, v7, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v12, 16, v7
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v7, v10, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[7:10], off, s[0:3], 0 offset:144
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v5, 16, v5
+; GCN-NOHSA-VI-NEXT:    s_waitcnt vmcnt(8)
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v7, v3, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v3
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v9, v3, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v8, 31, v7
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v10, 31, v9
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[7:10], off, s[0:3], 0 offset:96
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v3, 16, v2
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v8, v5, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_mov_b32_e32 v5, v4
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v4, 16, v4
+; GCN-NOHSA-VI-NEXT:    v_lshrrev_b32_e32 v7, 16, v1
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v27, v5, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v29, v4, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v15, v2, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v2, v17, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v19, v18, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v21, v16, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v13, v12, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v23, v1, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v17, v3, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_bfe_i32 v25, v7, 0, 16
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v28, 31, v27
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v30, 31, v29
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v3, 31, v2
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v11, 31, v10
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v21, 31, v20
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v23, 31, v22
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v13, 31, v12
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v15, 31, v14
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v17, 31, v16
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v19, 31, v18
-; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v5, 31, v4
 ; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v7, 31, v6
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[24:27], off, s[0:3], 0 offset:112
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[4:7], off, s[0:3], 0 offset:64
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[16:19], off, s[0:3], 0 offset:80
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[12:15], off, s[0:3], 0 offset:32
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[20:23], off, s[0:3], 0 offset:48
-; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[8:11], off, s[0:3], 0
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v9, 31, v8
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v20, 31, v19
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v22, 31, v21
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v12, 31, v11
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v14, 31, v13
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v16, 31, v15
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v18, 31, v17
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v24, 31, v23
+; GCN-NOHSA-VI-NEXT:    v_ashrrev_i32_e32 v26, 31, v25
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[27:30], off, s[0:3], 0 offset:112
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[23:26], off, s[0:3], 0 offset:64
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[15:18], off, s[0:3], 0 offset:80
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[11:14], off, s[0:3], 0 offset:32
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[19:22], off, s[0:3], 0 offset:48
+; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[6:9], off, s[0:3], 0
 ; GCN-NOHSA-VI-NEXT:    buffer_store_dwordx4 v[0:3], off, s[0:3], 0 offset:16
 ; GCN-NOHSA-VI-NEXT:    s_endpgm
 ;

diff  --git a/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll b/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll
index 7738ec689dc7eb..2d3c03bbe53179 100644
--- a/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll
+++ b/llvm/test/CodeGen/AMDGPU/loop-live-out-copy-undef-subrange.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 -amdgpu-enable-rewrite-partial-reg-uses=false -verify-machineinstrs < %s | FileCheck %s
 
 ; This example used to produce a verifier error resulting from the
 ; register coalescer leaving behind a false live interval when a live

diff  --git a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
index cf588601016007..1c75a2fc3dce67 100644
--- a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
+++ b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
@@ -47,7 +47,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    s_add_u32 s0, s0, s15
 ; CHECK-NEXT:    s_mov_b64 s[34:35], s[6:7]
 ; CHECK-NEXT:    s_addc_u32 s1, s1, 0
-; CHECK-NEXT:    v_mov_b32_e32 v41, v0
+; CHECK-NEXT:    v_mov_b32_e32 v40, v0
 ; CHECK-NEXT:    s_add_u32 s42, s34, 40
 ; CHECK-NEXT:    v_mov_b32_e32 v31, v0
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
@@ -65,7 +65,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    v_mov_b32_e32 v45, 0
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[6:7]
 ; CHECK-NEXT:    v_mov_b32_e32 v43, v0
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    s_mov_b64 s[4:5], s[38:39]
 ; CHECK-NEXT:    s_mov_b64 s[8:9], s[42:43]
@@ -77,8 +77,8 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    s_add_u32 s6, s6, _Z12get_local_idj at rel32@lo+4
 ; CHECK-NEXT:    s_addc_u32 s7, s7, _Z12get_local_idj at rel32@hi+12
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[6:7]
-; CHECK-NEXT:    v_mov_b32_e32 v40, v0
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v41, v0
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 1
 ; CHECK-NEXT:    s_mov_b64 s[4:5], s[38:39]
 ; CHECK-NEXT:    s_mov_b64 s[8:9], s[42:43]
@@ -93,7 +93,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[52:53]
 ; CHECK-NEXT:    v_lshrrev_b32_e32 v0, 1, v43
 ; CHECK-NEXT:    v_lshlrev_b32_e32 v1, 2, v43
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    s_mov_b64 s[4:5], s[38:39]
 ; CHECK-NEXT:    s_mov_b64 s[8:9], s[42:43]
 ; CHECK-NEXT:    v_and_b32_e32 v0, 0x7ffffffc, v0
@@ -115,7 +115,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    v_cmpx_ne_u32_e32 0, v42
 ; CHECK-NEXT:    s_cbranch_execz .LBB0_25
 ; CHECK-NEXT:  ; %bb.1: ; %.preheader5
-; CHECK-NEXT:    v_mul_lo_u32 v0, v40, 14
+; CHECK-NEXT:    v_mul_lo_u32 v0, v41, 14
 ; CHECK-NEXT:    s_mov_b32 s4, 0
 ; CHECK-NEXT:    s_mov_b32 s5, 0
 ; CHECK-NEXT:    v_add_nc_u32_e32 v44, 0x3c04, v0
@@ -184,7 +184,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    v_cmpx_eq_u16_e64 v56, v0
 ; CHECK-NEXT:    s_cbranch_execz .LBB0_10
 ; CHECK-NEXT:  ; %bb.9: ; in Loop: Header=BB0_8 Depth=2
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0x3c00
 ; CHECK-NEXT:    s_add_u32 s8, s34, 40
 ; CHECK-NEXT:    s_addc_u32 s9, s35, 0
@@ -205,7 +205,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    v_cmpx_eq_u16_e64 v56, v0
 ; CHECK-NEXT:    s_cbranch_execz .LBB0_12
 ; CHECK-NEXT:  ; %bb.11: ; in Loop: Header=BB0_8 Depth=2
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0x3c00
 ; CHECK-NEXT:    s_add_u32 s8, s34, 40
 ; CHECK-NEXT:    s_addc_u32 s9, s35, 0
@@ -227,7 +227,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    v_cmpx_eq_u16_e64 v56, v0
 ; CHECK-NEXT:    s_cbranch_execz .LBB0_14
 ; CHECK-NEXT:  ; %bb.13: ; in Loop: Header=BB0_8 Depth=2
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0x3c00
 ; CHECK-NEXT:    s_add_u32 s8, s34, 40
 ; CHECK-NEXT:    s_addc_u32 s9, s35, 0
@@ -249,7 +249,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    v_cmpx_eq_u16_e64 v56, v0
 ; CHECK-NEXT:    s_cbranch_execz .LBB0_7
 ; CHECK-NEXT:  ; %bb.15: ; in Loop: Header=BB0_8 Depth=2
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0x3c00
 ; CHECK-NEXT:    s_add_u32 s8, s34, 40
 ; CHECK-NEXT:    s_addc_u32 s9, s35, 0
@@ -297,7 +297,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    v_cmpx_eq_u16_e64 v56, v0
 ; CHECK-NEXT:    s_cbranch_execz .LBB0_19
 ; CHECK-NEXT:  ; %bb.21: ; in Loop: Header=BB0_20 Depth=2
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0x3c00
 ; CHECK-NEXT:    s_add_u32 s8, s34, 40
 ; CHECK-NEXT:    s_addc_u32 s9, s35, 0
@@ -330,7 +330,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    s_cbranch_execnz .LBB0_5
 ; CHECK-NEXT:  .LBB0_25: ; %Flow49
 ; CHECK-NEXT:    s_or_b32 exec_lo, exec_lo, s48
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 1
 ; CHECK-NEXT:    s_add_u32 s8, s34, 40
 ; CHECK-NEXT:    s_addc_u32 s9, s35, 0
@@ -344,7 +344,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    s_mov_b32 s4, exec_lo
 ; CHECK-NEXT:    ds_read_b32 v47, v0 offset:15360
 ; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
-; CHECK-NEXT:    v_cmpx_gt_u32_e64 v47, v40
+; CHECK-NEXT:    v_cmpx_gt_u32_e64 v47, v41
 ; CHECK-NEXT:    s_cbranch_execz .LBB0_33
 ; CHECK-NEXT:  ; %bb.26:
 ; CHECK-NEXT:    s_add_u32 s52, s44, 8
@@ -362,7 +362,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    s_branch .LBB0_28
 ; CHECK-NEXT:  .LBB0_27: ; in Loop: Header=BB0_28 Depth=1
 ; CHECK-NEXT:    s_or_b32 exec_lo, exec_lo, s55
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    v_mov_b32_e32 v0, 0
 ; CHECK-NEXT:    s_add_u32 s8, s34, 40
 ; CHECK-NEXT:    s_addc_u32 s9, s35, 0
@@ -372,13 +372,13 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    s_mov_b32 s13, s40
 ; CHECK-NEXT:    s_mov_b32 s14, s33
 ; CHECK-NEXT:    s_swappc_b64 s[30:31], s[48:49]
-; CHECK-NEXT:    v_add_co_u32 v40, vcc_lo, v0, v40
-; CHECK-NEXT:    v_cmp_le_u32_e32 vcc_lo, v47, v40
+; CHECK-NEXT:    v_add_co_u32 v41, vcc_lo, v0, v41
+; CHECK-NEXT:    v_cmp_le_u32_e32 vcc_lo, v47, v41
 ; CHECK-NEXT:    s_or_b32 s54, vcc_lo, s54
 ; CHECK-NEXT:    s_andn2_b32 exec_lo, exec_lo, s54
 ; CHECK-NEXT:    s_cbranch_execz .LBB0_33
 ; CHECK-NEXT:  .LBB0_28: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 2, v40
+; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 2, v41
 ; CHECK-NEXT:    s_mov_b32 s55, exec_lo
 ; CHECK-NEXT:    ds_read_b32 v0, v0
 ; CHECK-NEXT:    s_waitcnt lgkmcnt(0)
@@ -413,7 +413,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 4, v45
 ; CHECK-NEXT:    v_alignbit_b32 v1, v46, v45, 12
 ; CHECK-NEXT:    v_and_b32_e32 v2, 0xf0000, v45
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    s_add_u32 s8, s34, 40
 ; CHECK-NEXT:    v_and_b32_e32 v3, 0xf000, v0
 ; CHECK-NEXT:    v_and_b32_e32 v4, 0xf00, v1
@@ -471,7 +471,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    s_andn2_saveexec_b32 s4, s4
 ; CHECK-NEXT:    s_cbranch_execz .LBB0_27
 ; CHECK-NEXT:  ; %bb.32: ; in Loop: Header=BB0_28 Depth=1
-; CHECK-NEXT:    v_mov_b32_e32 v31, v41
+; CHECK-NEXT:    v_mov_b32_e32 v31, v40
 ; CHECK-NEXT:    v_mov_b32_e32 v0, v42
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v43
 ; CHECK-NEXT:    v_mov_b32_e32 v2, v44

diff  --git a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
index 3a337ec7a8733d..bd2e5cc5952bff 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_64_32.ll
@@ -155,24 +155,24 @@ define i128 @mad_i64_i32_sextops_i32_i128(i32 %arg0, i32 %arg1, i128 %arg2) #0 {
 ; CI:       ; %bb.0:
 ; CI-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CI-NEXT:    v_mad_u64_u32 v[6:7], s[4:5], v0, v1, 0
-; CI-NEXT:    v_ashrrev_i32_e32 v13, 31, v0
+; CI-NEXT:    v_ashrrev_i32_e32 v12, 31, v0
 ; CI-NEXT:    v_mov_b32_e32 v8, 0
-; CI-NEXT:    v_mad_u64_u32 v[9:10], s[4:5], v13, v1, v[7:8]
-; CI-NEXT:    v_ashrrev_i32_e32 v14, 31, v1
-; CI-NEXT:    v_mad_i64_i32 v[11:12], s[4:5], v1, v13, 0
-; CI-NEXT:    v_mov_b32_e32 v7, v10
+; CI-NEXT:    v_mad_u64_u32 v[9:10], s[4:5], v12, v1, v[7:8]
+; CI-NEXT:    v_ashrrev_i32_e32 v13, 31, v1
+; CI-NEXT:    v_mov_b32_e32 v11, v10
 ; CI-NEXT:    v_mov_b32_e32 v10, v8
-; CI-NEXT:    v_mad_u64_u32 v[8:9], s[4:5], v0, v14, v[9:10]
-; CI-NEXT:    v_mad_i64_i32 v[0:1], s[4:5], v14, v0, v[11:12]
-; CI-NEXT:    v_add_i32_e32 v9, vcc, v7, v9
-; CI-NEXT:    v_addc_u32_e64 v10, s[4:5], 0, 0, vcc
-; CI-NEXT:    v_mad_u64_u32 v[9:10], s[4:5], v13, v14, v[9:10]
-; CI-NEXT:    v_add_i32_e32 v7, vcc, v9, v0
-; CI-NEXT:    v_addc_u32_e32 v9, vcc, v10, v1, vcc
-; CI-NEXT:    v_mov_b32_e32 v1, v8
+; CI-NEXT:    v_mad_u64_u32 v[7:8], s[4:5], v0, v13, v[9:10]
+; CI-NEXT:    v_add_i32_e32 v8, vcc, v11, v8
+; CI-NEXT:    v_mad_i64_i32 v[10:11], s[4:5], v1, v12, 0
+; CI-NEXT:    v_addc_u32_e64 v9, s[4:5], 0, 0, vcc
+; CI-NEXT:    v_mad_u64_u32 v[8:9], s[4:5], v12, v13, v[8:9]
+; CI-NEXT:    v_mad_i64_i32 v[0:1], s[4:5], v13, v0, v[10:11]
+; CI-NEXT:    v_add_i32_e32 v8, vcc, v8, v0
+; CI-NEXT:    v_addc_u32_e32 v9, vcc, v9, v1, vcc
+; CI-NEXT:    v_mov_b32_e32 v1, v7
 ; CI-NEXT:    v_add_i32_e32 v0, vcc, v6, v2
 ; CI-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
-; CI-NEXT:    v_addc_u32_e32 v2, vcc, v7, v4, vcc
+; CI-NEXT:    v_addc_u32_e32 v2, vcc, v8, v4, vcc
 ; CI-NEXT:    v_addc_u32_e32 v3, vcc, v9, v5, vcc
 ; CI-NEXT:    s_setpc_b64 s[30:31]
 ;
@@ -245,26 +245,24 @@ define i128 @mad_i64_i32_sextops_i32_i128(i32 %arg0, i32 %arg1, i128 %arg2) #0 {
 ; GFX11-NEXT:    v_ashrrev_i32_e32 v15, 31, v1
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_mad_u64_u32 v[9:10], null, v14, v1, v[7:8]
-; GFX11-NEXT:    v_dual_mov_b32 v7, v10 :: v_dual_mov_b32 v10, v8
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_mad_u64_u32 v[11:12], null, v0, v15, v[9:10]
-; GFX11-NEXT:    v_mad_i64_i32 v[9:10], null, v1, v14, 0
-; GFX11-NEXT:    v_mov_b32_e32 v8, v12
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_mad_i64_i32 v[12:13], null, v15, v0, v[9:10]
-; GFX11-NEXT:    v_add_co_u32 v7, s0, v7, v8
+; GFX11-NEXT:    v_dual_mov_b32 v11, v10 :: v_dual_mov_b32 v10, v8
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v8, null, 0, 0, s0
-; GFX11-NEXT:    v_mad_u64_u32 v[0:1], null, v14, v15, v[7:8]
-; GFX11-NEXT:    v_mov_b32_e32 v7, v11
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_mad_u64_u32 v[7:8], null, v0, v15, v[9:10]
+; GFX11-NEXT:    v_mov_b32_e32 v10, v8
+; GFX11-NEXT:    v_mad_i64_i32 v[8:9], null, v1, v14, 0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_add_co_u32 v10, s0, v11, v10
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v11, null, 0, 0, s0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT:    v_mad_i64_i32 v[12:13], null, v15, v0, v[8:9]
+; GFX11-NEXT:    v_mad_u64_u32 v[0:1], null, v14, v15, v[10:11]
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_add_co_u32 v8, vcc_lo, v0, v12
 ; GFX11-NEXT:    v_add_co_ci_u32_e32 v9, vcc_lo, v1, v13, vcc_lo
 ; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, v6, v2
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v7, v3, vcc_lo
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
 ; GFX11-NEXT:    v_add_co_ci_u32_e32 v2, vcc_lo, v8, v4, vcc_lo
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4)
 ; GFX11-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v9, v5, vcc_lo
 ; GFX11-NEXT:    s_setpc_b64 s[30:31]
   %sext0 = sext i32 %arg0 to i128

diff  --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
index 615039500962da..8cf70eaf29e85f 100644
--- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
@@ -69,11 +69,11 @@ define void @issue63986(i64 %0, i64 %idxprom) {
 ; CHECK-NEXT:    v_lshlrev_b64 v[2:3], 6, v[2:3]
 ; CHECK-NEXT:    s_cbranch_execnz .LBB0_9
 ; CHECK-NEXT:  .LBB0_6: ; %loop-memcpy-residual.preheader
-; CHECK-NEXT:    v_mov_b32_e32 v7, s5
-; CHECK-NEXT:    v_or_b32_e32 v2, 32, v4
-; CHECK-NEXT:    v_mov_b32_e32 v3, v5
+; CHECK-NEXT:    v_mov_b32_e32 v2, s4
+; CHECK-NEXT:    v_or_b32_e32 v6, 32, v4
+; CHECK-NEXT:    v_mov_b32_e32 v7, v5
 ; CHECK-NEXT:    s_mov_b64 s[6:7], 0
-; CHECK-NEXT:    v_mov_b32_e32 v6, s4
+; CHECK-NEXT:    v_mov_b32_e32 v3, s5
 ; CHECK-NEXT:  .LBB0_7: ; %loop-memcpy-residual
 ; CHECK-NEXT:    ; =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    s_add_u32 s4, 32, s6
@@ -82,11 +82,11 @@ define void @issue63986(i64 %0, i64 %idxprom) {
 ; CHECK-NEXT:    v_mov_b32_e32 v8, s4
 ; CHECK-NEXT:    flat_load_ubyte v10, v[8:9]
 ; CHECK-NEXT:    v_mov_b32_e32 v9, s7
-; CHECK-NEXT:    v_add_co_u32_e32 v8, vcc, s6, v2
+; CHECK-NEXT:    v_add_co_u32_e32 v8, vcc, s6, v6
 ; CHECK-NEXT:    s_add_u32 s6, s6, 1
-; CHECK-NEXT:    v_addc_co_u32_e32 v9, vcc, v3, v9, vcc
+; CHECK-NEXT:    v_addc_co_u32_e32 v9, vcc, v7, v9, vcc
 ; CHECK-NEXT:    s_addc_u32 s7, s7, 0
-; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[6:7]
+; CHECK-NEXT:    v_cmp_lt_u64_e32 vcc, s[6:7], v[2:3]
 ; CHECK-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
 ; CHECK-NEXT:    flat_store_byte v[8:9], v10
 ; CHECK-NEXT:    s_cbranch_vccnz .LBB0_7

diff  --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll
index 179b0a855182ce..5e90c33f3c8cb7 100644
--- a/llvm/test/CodeGen/AMDGPU/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul.ll
@@ -2448,22 +2448,22 @@ define amdgpu_kernel void @v_mul_i128(ptr addrspace(1) %out, ptr addrspace(1) %a
 ; VI-NEXT:    v_mad_u64_u32 v[12:13], s[0:1], v4, v2, 0
 ; VI-NEXT:    v_mul_lo_u32 v14, v5, v2
 ; VI-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], v0, v4, 0
-; VI-NEXT:    v_mul_lo_u32 v15, v7, v0
-; VI-NEXT:    v_add_u32_e32 v7, vcc, v13, v10
+; VI-NEXT:    v_add_u32_e32 v13, vcc, v13, v10
 ; VI-NEXT:    v_mov_b32_e32 v10, v3
 ; VI-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v1, v4, v[10:11]
-; VI-NEXT:    v_add_u32_e32 v13, vcc, v7, v14
-; VI-NEXT:    v_mov_b32_e32 v7, v4
+; VI-NEXT:    v_add_u32_e32 v13, vcc, v13, v14
+; VI-NEXT:    v_mov_b32_e32 v10, v4
 ; VI-NEXT:    v_mov_b32_e32 v4, v11
+; VI-NEXT:    v_mul_lo_u32 v7, v7, v0
 ; VI-NEXT:    v_mad_u64_u32 v[12:13], s[0:1], v6, v0, v[12:13]
 ; VI-NEXT:    v_mad_u64_u32 v[3:4], s[0:1], v0, v5, v[3:4]
-; VI-NEXT:    v_add_u32_e32 v11, vcc, v15, v13
+; VI-NEXT:    v_add_u32_e32 v13, vcc, v7, v13
 ; VI-NEXT:    v_mov_b32_e32 v0, v4
-; VI-NEXT:    v_mul_lo_u32 v10, v6, v1
-; VI-NEXT:    v_add_u32_e32 v6, vcc, v7, v0
+; VI-NEXT:    v_mul_lo_u32 v11, v6, v1
+; VI-NEXT:    v_add_u32_e32 v6, vcc, v10, v0
 ; VI-NEXT:    v_addc_u32_e64 v7, s[0:1], 0, 0, vcc
 ; VI-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], v1, v5, v[6:7]
-; VI-NEXT:    v_add_u32_e32 v5, vcc, v10, v11
+; VI-NEXT:    v_add_u32_e32 v5, vcc, v11, v13
 ; VI-NEXT:    v_add_u32_e32 v4, vcc, v0, v12
 ; VI-NEXT:    v_addc_u32_e32 v5, vcc, v1, v5, vcc
 ; VI-NEXT:    flat_store_dwordx4 v[8:9], v[2:5]
@@ -2483,18 +2483,18 @@ define amdgpu_kernel void @v_mul_i128(ptr addrspace(1) %out, ptr addrspace(1) %a
 ; GFX9-NEXT:    v_mul_lo_u32 v15, v4, v3
 ; GFX9-NEXT:    v_mad_u64_u32 v[11:12], s[0:1], v1, v4, v[9:10]
 ; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], v4, v2, 0
-; GFX9-NEXT:    v_mov_b32_e32 v4, v12
+; GFX9-NEXT:    v_mul_lo_u32 v16, v7, v0
+; GFX9-NEXT:    v_mov_b32_e32 v7, v12
 ; GFX9-NEXT:    v_mov_b32_e32 v12, v10
 ; GFX9-NEXT:    v_mad_u64_u32 v[9:10], s[0:1], v0, v5, v[11:12]
 ; GFX9-NEXT:    v_add3_u32 v3, v3, v15, v14
-; GFX9-NEXT:    v_mul_lo_u32 v17, v7, v0
 ; GFX9-NEXT:    v_mad_u64_u32 v[2:3], s[0:1], v6, v0, v[2:3]
 ; GFX9-NEXT:    v_mov_b32_e32 v0, v10
-; GFX9-NEXT:    v_mul_lo_u32 v16, v6, v1
-; GFX9-NEXT:    v_add_co_u32_e32 v6, vcc, v4, v0
+; GFX9-NEXT:    v_mul_lo_u32 v4, v6, v1
+; GFX9-NEXT:    v_add_co_u32_e32 v6, vcc, v7, v0
 ; GFX9-NEXT:    v_addc_co_u32_e64 v7, s[0:1], 0, 0, vcc
 ; GFX9-NEXT:    v_mad_u64_u32 v[0:1], s[0:1], v1, v5, v[6:7]
-; GFX9-NEXT:    v_add3_u32 v3, v17, v3, v16
+; GFX9-NEXT:    v_add3_u32 v3, v16, v3, v4
 ; GFX9-NEXT:    v_add_co_u32_e32 v10, vcc, v0, v2
 ; GFX9-NEXT:    v_addc_co_u32_e32 v11, vcc, v1, v3, vcc
 ; GFX9-NEXT:    global_store_dwordx4 v13, v[8:11], s[2:3]
@@ -2503,72 +2503,69 @@ define amdgpu_kernel void @v_mul_i128(ptr addrspace(1) %out, ptr addrspace(1) %a
 ; GFX10-LABEL: v_mul_i128:
 ; GFX10:       ; %bb.0: ; %entry
 ; GFX10-NEXT:    s_load_dwordx4 s[0:3], s[0:1], 0x2c
-; GFX10-NEXT:    v_lshlrev_b32_e32 v14, 4, v0
+; GFX10-NEXT:    v_lshlrev_b32_e32 v13, 4, v0
 ; GFX10-NEXT:    v_mov_b32_e32 v10, 0
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_clause 0x1
-; GFX10-NEXT:    global_load_dwordx4 v[0:3], v14, s[0:1]
-; GFX10-NEXT:    global_load_dwordx4 v[4:7], v14, s[2:3]
+; GFX10-NEXT:    global_load_dwordx4 v[0:3], v13, s[0:1]
+; GFX10-NEXT:    global_load_dwordx4 v[4:7], v13, s[2:3]
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
 ; GFX10-NEXT:    v_mad_u64_u32 v[8:9], s0, v0, v4, 0
+; GFX10-NEXT:    v_mul_lo_u32 v15, v5, v2
 ; GFX10-NEXT:    v_mul_lo_u32 v7, v7, v0
 ; GFX10-NEXT:    v_mad_u64_u32 v[11:12], s0, v1, v4, v[9:10]
-; GFX10-NEXT:    v_mov_b32_e32 v9, v12
+; GFX10-NEXT:    v_mov_b32_e32 v14, v12
 ; GFX10-NEXT:    v_mov_b32_e32 v12, v10
-; GFX10-NEXT:    v_mul_lo_u32 v10, v5, v2
-; GFX10-NEXT:    v_mad_u64_u32 v[12:13], s0, v0, v5, v[11:12]
+; GFX10-NEXT:    v_mad_u64_u32 v[9:10], s0, v0, v5, v[11:12]
 ; GFX10-NEXT:    v_mul_lo_u32 v11, v4, v3
 ; GFX10-NEXT:    v_mad_u64_u32 v[2:3], s0, v4, v2, 0
-; GFX10-NEXT:    v_mov_b32_e32 v4, v13
-; GFX10-NEXT:    v_mul_lo_u32 v13, v6, v1
-; GFX10-NEXT:    v_add3_u32 v3, v3, v11, v10
-; GFX10-NEXT:    v_add_co_u32 v9, s0, v9, v4
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v10, s0, 0, 0, s0
+; GFX10-NEXT:    v_mul_lo_u32 v12, v6, v1
+; GFX10-NEXT:    v_mov_b32_e32 v4, v10
+; GFX10-NEXT:    v_add3_u32 v3, v3, v11, v15
+; GFX10-NEXT:    v_add_co_u32 v10, s0, v14, v4
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v11, s0, 0, 0, s0
 ; GFX10-NEXT:    v_mad_u64_u32 v[2:3], s0, v6, v0, v[2:3]
-; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s0, v1, v5, v[9:10]
-; GFX10-NEXT:    v_mov_b32_e32 v9, v12
-; GFX10-NEXT:    v_add3_u32 v3, v7, v3, v13
+; GFX10-NEXT:    v_mad_u64_u32 v[0:1], s0, v1, v5, v[10:11]
+; GFX10-NEXT:    v_add3_u32 v3, v7, v3, v12
 ; GFX10-NEXT:    v_add_co_u32 v10, vcc_lo, v0, v2
 ; GFX10-NEXT:    v_add_co_ci_u32_e32 v11, vcc_lo, v1, v3, vcc_lo
-; GFX10-NEXT:    global_store_dwordx4 v14, v[8:11], s[2:3]
+; GFX10-NEXT:    global_store_dwordx4 v13, v[8:11], s[2:3]
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX11-LABEL: v_mul_i128:
 ; GFX11:       ; %bb.0: ; %entry
 ; GFX11-NEXT:    s_load_b128 s[0:3], s[0:1], 0x2c
-; GFX11-NEXT:    v_lshlrev_b32_e32 v16, 4, v0
-; GFX11-NEXT:    v_mov_b32_e32 v10, 0
+; GFX11-NEXT:    v_dual_mov_b32 v10, 0 :: v_dual_lshlrev_b32 v15, 4, v0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_clause 0x1
-; GFX11-NEXT:    global_load_b128 v[0:3], v16, s[0:1]
-; GFX11-NEXT:    global_load_b128 v[4:7], v16, s[2:3]
+; GFX11-NEXT:    global_load_b128 v[0:3], v15, s[0:1]
+; GFX11-NEXT:    global_load_b128 v[4:7], v15, s[2:3]
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    v_mad_u64_u32 v[8:9], null, v0, v4, 0
-; GFX11-NEXT:    v_mul_lo_u32 v15, v5, v2
+; GFX11-NEXT:    v_mul_lo_u32 v14, v5, v2
 ; GFX11-NEXT:    v_mul_lo_u32 v3, v4, v3
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    v_mad_u64_u32 v[11:12], null, v1, v4, v[9:10]
-; GFX11-NEXT:    v_dual_mov_b32 v9, v12 :: v_dual_mov_b32 v12, v10
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_4)
-; GFX11-NEXT:    v_mad_u64_u32 v[13:14], null, v0, v5, v[11:12]
-; GFX11-NEXT:    v_mad_u64_u32 v[10:11], null, v4, v2, 0
+; GFX11-NEXT:    v_dual_mov_b32 v13, v12 :: v_dual_mov_b32 v12, v10
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_mad_u64_u32 v[9:10], null, v0, v5, v[11:12]
+; GFX11-NEXT:    v_mad_u64_u32 v[11:12], null, v4, v2, 0
 ; GFX11-NEXT:    v_mul_lo_u32 v4, v6, v1
-; GFX11-NEXT:    v_mul_lo_u32 v12, v7, v0
-; GFX11-NEXT:    v_mov_b32_e32 v2, v14
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add3_u32 v11, v11, v3, v15
-; GFX11-NEXT:    v_add_co_u32 v2, s0, v9, v2
-; GFX11-NEXT:    v_mov_b32_e32 v9, v13
+; GFX11-NEXT:    v_mov_b32_e32 v2, v10
+; GFX11-NEXT:    v_mul_lo_u32 v10, v7, v0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_3)
+; GFX11-NEXT:    v_add3_u32 v12, v12, v3, v14
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v13, v2
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, null, 0, 0, s0
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_mad_u64_u32 v[14:15], null, v6, v0, v[10:11]
+; GFX11-NEXT:    v_mad_u64_u32 v[13:14], null, v6, v0, v[11:12]
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
 ; GFX11-NEXT:    v_mad_u64_u32 v[6:7], null, v1, v5, v[2:3]
+; GFX11-NEXT:    v_add3_u32 v0, v10, v14, v4
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add3_u32 v0, v12, v15, v4
-; GFX11-NEXT:    v_add_co_u32 v10, vcc_lo, v6, v14
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT:    v_add_co_u32 v10, vcc_lo, v6, v13
 ; GFX11-NEXT:    v_add_co_ci_u32_e32 v11, vcc_lo, v7, v0, vcc_lo
-; GFX11-NEXT:    global_store_b128 v16, v[8:11], s[2:3]
+; GFX11-NEXT:    global_store_b128 v15, v[8:11], s[2:3]
 ; GFX11-NEXT:    s_nop 0
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX11-NEXT:    s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
index 85cd00cbfc5367..8081d40f7e665c 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-constOffset-to-imm.ll
@@ -363,96 +363,96 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX8-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; GFX8-NEXT:    v_lshlrev_b32_e32 v1, 17, v0
-; GFX8-NEXT:    v_mov_b32_e32 v2, 3
-; GFX8-NEXT:    v_and_b32_e32 v1, 0xfe000000, v1
-; GFX8-NEXT:    v_lshlrev_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
-; GFX8-NEXT:    v_mov_b32_e32 v2, s35
+; GFX8-NEXT:    v_and_b32_e32 v6, 0xfe000000, v1
+; GFX8-NEXT:    v_mov_b32_e32 v1, 3
+; GFX8-NEXT:    v_lshlrev_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX8-NEXT:    v_or_b32_e32 v0, v6, v0
+; GFX8-NEXT:    v_mov_b32_e32 v1, s35
 ; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s34, v0
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, 0, v2, vcc
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GFX8-NEXT:    s_movk_i32 s0, 0x5000
-; GFX8-NEXT:    v_add_u32_e32 v2, vcc, s0, v0
-; GFX8-NEXT:    v_mov_b32_e32 v4, 0
-; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; GFX8-NEXT:    v_mov_b32_e32 v5, 0
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT:    v_mov_b32_e32 v2, 0
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    v_mov_b32_e32 v3, 0
 ; GFX8-NEXT:    s_movk_i32 s0, 0x7f
 ; GFX8-NEXT:  .LBB1_1: ; %for.cond.preheader
 ; GFX8-NEXT:    ; =>This Loop Header: Depth=1
 ; GFX8-NEXT:    ; Child Loop BB1_2 Depth 2
-; GFX8-NEXT:    v_mov_b32_e32 v7, v3
-; GFX8-NEXT:    v_mov_b32_e32 v6, v2
+; GFX8-NEXT:    v_mov_b32_e32 v5, v1
+; GFX8-NEXT:    v_mov_b32_e32 v4, v0
 ; GFX8-NEXT:    s_mov_b32 s1, 0
 ; GFX8-NEXT:  .LBB1_2: ; %for.body
 ; GFX8-NEXT:    ; Parent Loop BB1_1 Depth=1
 ; GFX8-NEXT:    ; => This Inner Loop Header: Depth=2
-; GFX8-NEXT:    v_add_u32_e32 v8, vcc, 0xffffb000, v6
-; GFX8-NEXT:    v_addc_u32_e32 v9, vcc, -1, v7, vcc
-; GFX8-NEXT:    v_add_u32_e32 v10, vcc, 0xffffb800, v6
-; GFX8-NEXT:    v_addc_u32_e32 v11, vcc, -1, v7, vcc
-; GFX8-NEXT:    v_add_u32_e32 v12, vcc, 0xffffc000, v6
-; GFX8-NEXT:    flat_load_dwordx2 v[8:9], v[8:9]
-; GFX8-NEXT:    flat_load_dwordx2 v[10:11], v[10:11]
-; GFX8-NEXT:    v_addc_u32_e32 v13, vcc, -1, v7, vcc
-; GFX8-NEXT:    v_add_u32_e32 v14, vcc, 0xffffc800, v6
-; GFX8-NEXT:    v_addc_u32_e32 v15, vcc, -1, v7, vcc
-; GFX8-NEXT:    v_add_u32_e32 v16, vcc, 0xffffd000, v6
-; GFX8-NEXT:    flat_load_dwordx2 v[12:13], v[12:13]
-; GFX8-NEXT:    flat_load_dwordx2 v[14:15], v[14:15]
-; GFX8-NEXT:    v_addc_u32_e32 v17, vcc, -1, v7, vcc
-; GFX8-NEXT:    v_add_u32_e32 v18, vcc, 0xffffd800, v6
-; GFX8-NEXT:    v_addc_u32_e32 v19, vcc, -1, v7, vcc
-; GFX8-NEXT:    flat_load_dwordx2 v[16:17], v[16:17]
-; GFX8-NEXT:    flat_load_dwordx2 v[18:19], v[18:19]
-; GFX8-NEXT:    v_add_u32_e32 v20, vcc, 0xffffe000, v6
-; GFX8-NEXT:    v_addc_u32_e32 v21, vcc, -1, v7, vcc
-; GFX8-NEXT:    v_add_u32_e32 v22, vcc, 0xffffe800, v6
-; GFX8-NEXT:    flat_load_dwordx2 v[20:21], v[20:21]
-; GFX8-NEXT:    v_addc_u32_e32 v23, vcc, -1, v7, vcc
-; GFX8-NEXT:    flat_load_dwordx2 v[22:23], v[22:23]
-; GFX8-NEXT:    v_add_u32_e32 v24, vcc, 0xfffff000, v6
-; GFX8-NEXT:    v_addc_u32_e32 v25, vcc, -1, v7, vcc
-; GFX8-NEXT:    flat_load_dwordx2 v[24:25], v[24:25]
-; GFX8-NEXT:    v_add_u32_e32 v26, vcc, 0xfffff800, v6
-; GFX8-NEXT:    v_addc_u32_e32 v27, vcc, -1, v7, vcc
-; GFX8-NEXT:    flat_load_dwordx2 v[26:27], v[26:27]
-; GFX8-NEXT:    flat_load_dwordx2 v[28:29], v[6:7]
-; GFX8-NEXT:    v_add_u32_e32 v6, vcc, 0x10000, v6
-; GFX8-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GFX8-NEXT:    v_add_u32_e32 v7, vcc, 0xffffb000, v4
+; GFX8-NEXT:    v_addc_u32_e32 v8, vcc, -1, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v9, vcc, 0xffffb800, v4
+; GFX8-NEXT:    v_addc_u32_e32 v10, vcc, -1, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v11, vcc, 0xffffc000, v4
+; GFX8-NEXT:    flat_load_dwordx2 v[7:8], v[7:8]
+; GFX8-NEXT:    flat_load_dwordx2 v[9:10], v[9:10]
+; GFX8-NEXT:    v_addc_u32_e32 v12, vcc, -1, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v13, vcc, 0xffffc800, v4
+; GFX8-NEXT:    v_addc_u32_e32 v14, vcc, -1, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v15, vcc, 0xffffd000, v4
+; GFX8-NEXT:    flat_load_dwordx2 v[11:12], v[11:12]
+; GFX8-NEXT:    flat_load_dwordx2 v[13:14], v[13:14]
+; GFX8-NEXT:    v_addc_u32_e32 v16, vcc, -1, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v17, vcc, 0xffffd800, v4
+; GFX8-NEXT:    v_addc_u32_e32 v18, vcc, -1, v5, vcc
+; GFX8-NEXT:    flat_load_dwordx2 v[15:16], v[15:16]
+; GFX8-NEXT:    flat_load_dwordx2 v[17:18], v[17:18]
+; GFX8-NEXT:    v_add_u32_e32 v19, vcc, 0xffffe000, v4
+; GFX8-NEXT:    v_addc_u32_e32 v20, vcc, -1, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v21, vcc, 0xffffe800, v4
+; GFX8-NEXT:    flat_load_dwordx2 v[19:20], v[19:20]
+; GFX8-NEXT:    v_addc_u32_e32 v22, vcc, -1, v5, vcc
+; GFX8-NEXT:    flat_load_dwordx2 v[21:22], v[21:22]
+; GFX8-NEXT:    v_add_u32_e32 v23, vcc, 0xfffff000, v4
+; GFX8-NEXT:    v_addc_u32_e32 v24, vcc, -1, v5, vcc
+; GFX8-NEXT:    flat_load_dwordx2 v[23:24], v[23:24]
+; GFX8-NEXT:    v_add_u32_e32 v25, vcc, 0xfffff800, v4
+; GFX8-NEXT:    v_addc_u32_e32 v26, vcc, -1, v5, vcc
+; GFX8-NEXT:    flat_load_dwordx2 v[25:26], v[25:26]
+; GFX8-NEXT:    flat_load_dwordx2 v[27:28], v[4:5]
+; GFX8-NEXT:    v_add_u32_e32 v4, vcc, 0x10000, v4
+; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
 ; GFX8-NEXT:    s_addk_i32 s1, 0x2000
 ; GFX8-NEXT:    s_cmp_gt_u32 s1, 0x3fffff
 ; GFX8-NEXT:    s_waitcnt vmcnt(10)
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v8, v4
-; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v9, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v7, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v8, v3, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(9)
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v10, v0
-; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v11, v4, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v9, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v10, v3, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(8)
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v12, v0
-; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v13, v4, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v11, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v12, v3, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(7)
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v14, v0
-; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v15, v4, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v13, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v14, v3, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(6)
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v16, v0
-; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v17, v4, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v15, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v16, v3, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(5)
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v18, v0
-; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v19, v4, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v17, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v18, v3, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(4)
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v20, v0
-; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v21, v4, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v19, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v20, v3, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(3)
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v22, v0
-; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v23, v4, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v21, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v22, v3, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(2)
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v24, v0
-; GFX8-NEXT:    v_addc_u32_e32 v4, vcc, v25, v4, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v23, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v24, v3, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(1)
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v26, v0
-; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, v27, v4, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v25, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v26, v3, vcc
 ; GFX8-NEXT:    s_waitcnt vmcnt(0)
-; GFX8-NEXT:    v_add_u32_e32 v4, vcc, v28, v0
-; GFX8-NEXT:    v_addc_u32_e32 v5, vcc, v29, v5, vcc
+; GFX8-NEXT:    v_add_u32_e32 v2, vcc, v27, v2
+; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, v28, v3, vcc
 ; GFX8-NEXT:    s_cbranch_scc0 .LBB1_2
 ; GFX8-NEXT:  ; %bb.3: ; %while.cond.loopexit
 ; GFX8-NEXT:    ; in Loop: Header=BB1_1 Depth=1
@@ -463,10 +463,10 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX8-NEXT:    s_mov_b32 s0, s1
 ; GFX8-NEXT:    s_branch .LBB1_1
 ; GFX8-NEXT:  .LBB1_5: ; %while.end
-; GFX8-NEXT:    v_mov_b32_e32 v2, s35
-; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s34, v1
-; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v2, vcc
-; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[4:5]
+; GFX8-NEXT:    v_mov_b32_e32 v1, s35
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s34, v6
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
 ; GFX8-NEXT:    s_endpgm
 ;
 ; GFX900-LABEL: clmem_read:
@@ -491,84 +491,84 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX900-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; GFX900-NEXT:    v_and_b32_e32 v1, 0xff, v0
 ; GFX900-NEXT:    v_lshlrev_b32_e32 v0, 17, v0
-; GFX900-NEXT:    v_and_b32_e32 v0, 0xfe000000, v0
-; GFX900-NEXT:    v_lshl_or_b32 v1, v1, 3, v0
-; GFX900-NEXT:    v_mov_b32_e32 v2, s35
-; GFX900-NEXT:    v_add_co_u32_e32 v1, vcc, s34, v1
-; GFX900-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GFX900-NEXT:    v_and_b32_e32 v6, 0xfe000000, v0
+; GFX900-NEXT:    v_lshl_or_b32 v0, v1, 3, v6
+; GFX900-NEXT:    v_mov_b32_e32 v1, s35
+; GFX900-NEXT:    v_add_co_u32_e32 v0, vcc, s34, v0
+; GFX900-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
 ; GFX900-NEXT:    s_movk_i32 s0, 0x5000
-; GFX900-NEXT:    v_add_co_u32_e32 v1, vcc, s0, v1
-; GFX900-NEXT:    v_mov_b32_e32 v3, 0
-; GFX900-NEXT:    v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX900-NEXT:    v_mov_b32_e32 v2, 0
+; GFX900-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
 ; GFX900-NEXT:    s_movk_i32 s2, 0x7f
-; GFX900-NEXT:    v_mov_b32_e32 v4, 0
+; GFX900-NEXT:    v_mov_b32_e32 v3, 0
 ; GFX900-NEXT:    s_movk_i32 s0, 0xd000
 ; GFX900-NEXT:    s_movk_i32 s1, 0xe000
 ; GFX900-NEXT:    s_movk_i32 s3, 0xf000
 ; GFX900-NEXT:  .LBB1_1: ; %for.cond.preheader
 ; GFX900-NEXT:    ; =>This Loop Header: Depth=1
 ; GFX900-NEXT:    ; Child Loop BB1_2 Depth 2
-; GFX900-NEXT:    v_mov_b32_e32 v6, v2
 ; GFX900-NEXT:    v_mov_b32_e32 v5, v1
+; GFX900-NEXT:    v_mov_b32_e32 v4, v0
 ; GFX900-NEXT:    s_mov_b32 s4, 0
 ; GFX900-NEXT:  .LBB1_2: ; %for.body
 ; GFX900-NEXT:    ; Parent Loop BB1_1 Depth=1
 ; GFX900-NEXT:    ; => This Inner Loop Header: Depth=2
-; GFX900-NEXT:    v_add_co_u32_e32 v7, vcc, 0xffffb000, v5
-; GFX900-NEXT:    v_addc_co_u32_e32 v8, vcc, -1, v6, vcc
-; GFX900-NEXT:    global_load_dwordx2 v[9:10], v[5:6], off offset:-4096
-; GFX900-NEXT:    global_load_dwordx2 v[11:12], v[5:6], off offset:-2048
-; GFX900-NEXT:    v_add_co_u32_e32 v13, vcc, 0xffffc000, v5
+; GFX900-NEXT:    v_add_co_u32_e32 v7, vcc, 0xffffb000, v4
+; GFX900-NEXT:    v_addc_co_u32_e32 v8, vcc, -1, v5, vcc
+; GFX900-NEXT:    global_load_dwordx2 v[9:10], v[4:5], off offset:-4096
+; GFX900-NEXT:    global_load_dwordx2 v[11:12], v[4:5], off offset:-2048
+; GFX900-NEXT:    v_add_co_u32_e32 v13, vcc, 0xffffc000, v4
 ; GFX900-NEXT:    global_load_dwordx2 v[7:8], v[7:8], off
-; GFX900-NEXT:    v_addc_co_u32_e32 v14, vcc, -1, v6, vcc
+; GFX900-NEXT:    v_addc_co_u32_e32 v14, vcc, -1, v5, vcc
 ; GFX900-NEXT:    global_load_dwordx2 v[17:18], v[13:14], off offset:-2048
-; GFX900-NEXT:    v_add_co_u32_e32 v15, vcc, s0, v5
-; GFX900-NEXT:    v_addc_co_u32_e32 v16, vcc, -1, v6, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v15, vcc, s0, v4
+; GFX900-NEXT:    v_addc_co_u32_e32 v16, vcc, -1, v5, vcc
 ; GFX900-NEXT:    global_load_dwordx2 v[15:16], v[15:16], off offset:-2048
-; GFX900-NEXT:    v_add_co_u32_e32 v19, vcc, s1, v5
+; GFX900-NEXT:    v_add_co_u32_e32 v19, vcc, s1, v4
 ; GFX900-NEXT:    global_load_dwordx2 v[13:14], v[13:14], off
-; GFX900-NEXT:    v_addc_co_u32_e32 v20, vcc, -1, v6, vcc
+; GFX900-NEXT:    v_addc_co_u32_e32 v20, vcc, -1, v5, vcc
 ; GFX900-NEXT:    global_load_dwordx2 v[23:24], v[19:20], off offset:-4096
 ; GFX900-NEXT:    global_load_dwordx2 v[25:26], v[19:20], off offset:-2048
 ; GFX900-NEXT:    global_load_dwordx2 v[27:28], v[19:20], off
-; GFX900-NEXT:    v_add_co_u32_e32 v21, vcc, s3, v5
-; GFX900-NEXT:    v_addc_co_u32_e32 v22, vcc, -1, v6, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v21, vcc, s3, v4
+; GFX900-NEXT:    v_addc_co_u32_e32 v22, vcc, -1, v5, vcc
 ; GFX900-NEXT:    global_load_dwordx2 v[19:20], v[21:22], off offset:-2048
-; GFX900-NEXT:    global_load_dwordx2 v[29:30], v[5:6], off
-; GFX900-NEXT:    v_add_co_u32_e32 v5, vcc, 0x10000, v5
-; GFX900-NEXT:    v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX900-NEXT:    global_load_dwordx2 v[29:30], v[4:5], off
+; GFX900-NEXT:    v_add_co_u32_e32 v4, vcc, 0x10000, v4
+; GFX900-NEXT:    v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
 ; GFX900-NEXT:    s_addk_i32 s4, 0x2000
 ; GFX900-NEXT:    s_cmp_gt_u32 s4, 0x3fffff
 ; GFX900-NEXT:    s_waitcnt vmcnt(8)
-; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v7, v3
-; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v8, v4, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v7, v2
+; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v8, v3, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(7)
-; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v17, v3
-; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v18, v4, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v17, v2
+; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v18, v3, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(5)
-; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v13, v3
-; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v14, v4, vcc
-; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v15, v3
-; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v16, v4, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v13, v2
+; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v14, v3, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v15, v2
+; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v16, v3, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(4)
-; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v23, v3
-; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v24, v4, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v23, v2
+; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v24, v3, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(3)
-; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v25, v3
-; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v26, v4, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v25, v2
+; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v26, v3, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(2)
-; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v27, v3
-; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v28, v4, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v27, v2
+; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v28, v3, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(1)
-; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v19, v3
-; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v20, v4, vcc
-; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v9, v3
-; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v10, v4, vcc
-; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v11, v3
-; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v12, v4, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v19, v2
+; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v20, v3, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v9, v2
+; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v10, v3, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v11, v2
+; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v12, v3, vcc
 ; GFX900-NEXT:    s_waitcnt vmcnt(0)
-; GFX900-NEXT:    v_add_co_u32_e32 v3, vcc, v29, v3
-; GFX900-NEXT:    v_addc_co_u32_e32 v4, vcc, v30, v4, vcc
+; GFX900-NEXT:    v_add_co_u32_e32 v2, vcc, v29, v2
+; GFX900-NEXT:    v_addc_co_u32_e32 v3, vcc, v30, v3, vcc
 ; GFX900-NEXT:    s_cbranch_scc0 .LBB1_2
 ; GFX900-NEXT:  ; %bb.3: ; %while.cond.loopexit
 ; GFX900-NEXT:    ; in Loop: Header=BB1_1 Depth=1
@@ -580,9 +580,9 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX900-NEXT:    s_branch .LBB1_1
 ; GFX900-NEXT:  .LBB1_5: ; %while.end
 ; GFX900-NEXT:    v_mov_b32_e32 v1, s35
-; GFX900-NEXT:    v_add_co_u32_e32 v0, vcc, s34, v0
+; GFX900-NEXT:    v_add_co_u32_e32 v0, vcc, s34, v6
 ; GFX900-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX900-NEXT:    global_store_dwordx2 v[0:1], v[3:4], off
+; GFX900-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
 ; GFX900-NEXT:    s_endpgm
 ;
 ; GFX10-LABEL: clmem_read:
@@ -606,39 +606,39 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX10-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX10-NEXT:    s_swappc_b64 s[30:31], s[4:5]
 ; GFX10-NEXT:    v_lshlrev_b32_e32 v1, 17, v0
-; GFX10-NEXT:    v_and_b32_e32 v2, 0xff, v0
+; GFX10-NEXT:    v_and_b32_e32 v0, 0xff, v0
+; GFX10-NEXT:    v_mov_b32_e32 v2, 0
 ; GFX10-NEXT:    v_mov_b32_e32 v3, 0
-; GFX10-NEXT:    v_mov_b32_e32 v4, 0
 ; GFX10-NEXT:    s_movk_i32 s1, 0x7f
-; GFX10-NEXT:    v_and_b32_e32 v0, 0xfe000000, v1
-; GFX10-NEXT:    v_lshl_or_b32 v1, v2, 3, v0
-; GFX10-NEXT:    v_add_co_u32 v1, s0, v1, s34
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v2, s0, 0, s35, s0
-; GFX10-NEXT:    v_add_co_u32 v1, vcc_lo, 0x5000, v1
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo
+; GFX10-NEXT:    v_and_b32_e32 v6, 0xfe000000, v1
+; GFX10-NEXT:    v_lshl_or_b32 v0, v0, 3, v6
+; GFX10-NEXT:    v_add_co_u32 v0, s0, v0, s34
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, s0, 0, s35, s0
+; GFX10-NEXT:    v_add_co_u32 v0, vcc_lo, 0x5000, v0
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
 ; GFX10-NEXT:  .LBB1_1: ; %for.cond.preheader
 ; GFX10-NEXT:    ; =>This Loop Header: Depth=1
 ; GFX10-NEXT:    ; Child Loop BB1_2 Depth 2
-; GFX10-NEXT:    v_mov_b32_e32 v6, v2
 ; GFX10-NEXT:    v_mov_b32_e32 v5, v1
+; GFX10-NEXT:    v_mov_b32_e32 v4, v0
 ; GFX10-NEXT:    s_mov_b32 s2, 0
 ; GFX10-NEXT:  .LBB1_2: ; %for.body
 ; GFX10-NEXT:    ; Parent Loop BB1_1 Depth=1
 ; GFX10-NEXT:    ; => This Inner Loop Header: Depth=2
-; GFX10-NEXT:    v_add_co_u32 v7, vcc_lo, v5, 0xffffb800
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, -1, v6, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v9, vcc_lo, v5, 0xffffc800
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v10, vcc_lo, -1, v6, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v13, vcc_lo, v5, 0xffffd800
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v14, vcc_lo, -1, v6, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v17, vcc_lo, v5, 0xffffe800
+; GFX10-NEXT:    v_add_co_u32 v7, vcc_lo, v4, 0xffffb800
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, -1, v5, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v9, vcc_lo, v4, 0xffffc800
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v10, vcc_lo, -1, v5, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v13, vcc_lo, v4, 0xffffd800
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v14, vcc_lo, -1, v5, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v17, vcc_lo, v4, 0xffffe800
 ; GFX10-NEXT:    s_clause 0x2
 ; GFX10-NEXT:    global_load_dwordx2 v[11:12], v[7:8], off offset:-2048
 ; GFX10-NEXT:    global_load_dwordx2 v[15:16], v[9:10], off offset:-2048
 ; GFX10-NEXT:    global_load_dwordx2 v[19:20], v[13:14], off offset:-2048
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v18, vcc_lo, -1, v6, vcc_lo
-; GFX10-NEXT:    v_add_co_u32 v21, vcc_lo, 0xfffff000, v5
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v22, vcc_lo, -1, v6, vcc_lo
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v18, vcc_lo, -1, v5, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v21, vcc_lo, 0xfffff000, v4
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v22, vcc_lo, -1, v5, vcc_lo
 ; GFX10-NEXT:    s_clause 0x7
 ; GFX10-NEXT:    global_load_dwordx2 v[23:24], v[17:18], off offset:-2048
 ; GFX10-NEXT:    global_load_dwordx2 v[7:8], v[7:8], off
@@ -646,42 +646,42 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX10-NEXT:    global_load_dwordx2 v[13:14], v[13:14], off
 ; GFX10-NEXT:    global_load_dwordx2 v[25:26], v[17:18], off
 ; GFX10-NEXT:    global_load_dwordx2 v[27:28], v[21:22], off
-; GFX10-NEXT:    global_load_dwordx2 v[29:30], v[5:6], off offset:-2048
-; GFX10-NEXT:    global_load_dwordx2 v[31:32], v[5:6], off
-; GFX10-NEXT:    v_add_co_u32 v5, vcc_lo, 0x10000, v5
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
+; GFX10-NEXT:    global_load_dwordx2 v[29:30], v[4:5], off offset:-2048
+; GFX10-NEXT:    global_load_dwordx2 v[31:32], v[4:5], off
+; GFX10-NEXT:    v_add_co_u32 v4, vcc_lo, 0x10000, v4
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
 ; GFX10-NEXT:    s_addk_i32 s2, 0x2000
 ; GFX10-NEXT:    s_cmp_gt_u32 s2, 0x3fffff
 ; GFX10-NEXT:    s_waitcnt vmcnt(10)
-; GFX10-NEXT:    v_add_co_u32 v3, s0, v11, v3
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v12, v4, s0
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v11, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v12, v3, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(6)
-; GFX10-NEXT:    v_add_co_u32 v3, s0, v7, v3
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v8, v4, s0
-; GFX10-NEXT:    v_add_co_u32 v3, s0, v15, v3
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v16, v4, s0
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v7, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v8, v3, s0
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v15, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v16, v3, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(5)
-; GFX10-NEXT:    v_add_co_u32 v3, s0, v9, v3
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v10, v4, s0
-; GFX10-NEXT:    v_add_co_u32 v3, s0, v19, v3
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v20, v4, s0
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v9, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v10, v3, s0
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v19, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v20, v3, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(4)
-; GFX10-NEXT:    v_add_co_u32 v3, s0, v13, v3
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v14, v4, s0
-; GFX10-NEXT:    v_add_co_u32 v3, s0, v23, v3
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v24, v4, s0
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v13, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v14, v3, s0
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v23, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v24, v3, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(3)
-; GFX10-NEXT:    v_add_co_u32 v3, s0, v25, v3
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v26, v4, s0
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v25, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v26, v3, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(2)
-; GFX10-NEXT:    v_add_co_u32 v3, s0, v27, v3
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v28, v4, s0
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v27, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v28, v3, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(1)
-; GFX10-NEXT:    v_add_co_u32 v3, s0, v29, v3
-; GFX10-NEXT:    v_add_co_ci_u32_e64 v4, s0, v30, v4, s0
+; GFX10-NEXT:    v_add_co_u32 v2, s0, v29, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e64 v3, s0, v30, v3, s0
 ; GFX10-NEXT:    s_waitcnt vmcnt(0)
-; GFX10-NEXT:    v_add_co_u32 v3, vcc_lo, v31, v3
-; GFX10-NEXT:    v_add_co_ci_u32_e32 v4, vcc_lo, v32, v4, vcc_lo
+; GFX10-NEXT:    v_add_co_u32 v2, vcc_lo, v31, v2
+; GFX10-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v32, v3, vcc_lo
 ; GFX10-NEXT:    s_cbranch_scc0 .LBB1_2
 ; GFX10-NEXT:  ; %bb.3: ; %while.cond.loopexit
 ; GFX10-NEXT:    ; in Loop: Header=BB1_1 Depth=1
@@ -692,9 +692,9 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX10-NEXT:    s_mov_b32 s1, s0
 ; GFX10-NEXT:    s_branch .LBB1_1
 ; GFX10-NEXT:  .LBB1_5: ; %while.end
-; GFX10-NEXT:    v_add_co_u32 v0, s0, s34, v0
+; GFX10-NEXT:    v_add_co_u32 v0, s0, s34, v6
 ; GFX10-NEXT:    v_add_co_ci_u32_e64 v1, s0, s35, 0, s0
-; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[3:4], off
+; GFX10-NEXT:    global_store_dwordx2 v[0:1], v[2:3], off
 ; GFX10-NEXT:    s_endpgm
 ;
 ; GFX90A-LABEL: clmem_read:
@@ -822,101 +822,100 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX11-NEXT:    s_mov_b32 s32, 0
 ; GFX11-NEXT:    s_waitcnt lgkmcnt(0)
 ; GFX11-NEXT:    s_swappc_b64 s[30:31], s[2:3]
-; GFX11-NEXT:    v_lshlrev_b32_e32 v1, 17, v0
-; GFX11-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v2, 0xff, v0
-; GFX11-NEXT:    v_mov_b32_e32 v4, 0
+; GFX11-NEXT:    v_dual_mov_b32 v2, 0 :: v_dual_lshlrev_b32 v1, 17, v0
+; GFX11-NEXT:    v_dual_mov_b32 v3, 0 :: v_dual_and_b32 v0, 0xff, v0
 ; GFX11-NEXT:    s_movk_i32 s1, 0x7f
-; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_and_b32_e32 v0, 0xfe000000, v1
-; GFX11-NEXT:    v_lshl_or_b32 v1, v2, 3, v0
+; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT:    v_and_b32_e32 v6, 0xfe000000, v1
+; GFX11-NEXT:    v_lshl_or_b32 v0, v0, 3, v6
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v1, s0, v1, s34
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v2, null, 0, s35, s0
+; GFX11-NEXT:    v_add_co_u32 v0, s0, v0, s34
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, 0, s35, s0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_u32 v1, vcc_lo, 0x5000, v1
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v2, vcc_lo, 0, v2, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v0, vcc_lo, 0x5000, v0
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, 0, v1, vcc_lo
 ; GFX11-NEXT:  .LBB1_1: ; %for.cond.preheader
 ; GFX11-NEXT:    ; =>This Loop Header: Depth=1
 ; GFX11-NEXT:    ; Child Loop BB1_2 Depth 2
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT:    v_dual_mov_b32 v6, v2 :: v_dual_mov_b32 v5, v1
+; GFX11-NEXT:    v_dual_mov_b32 v5, v1 :: v_dual_mov_b32 v4, v0
 ; GFX11-NEXT:    s_mov_b32 s2, 0
 ; GFX11-NEXT:  .LBB1_2: ; %for.body
 ; GFX11-NEXT:    ; Parent Loop BB1_1 Depth=1
 ; GFX11-NEXT:    ; => This Inner Loop Header: Depth=2
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_u32 v7, vcc_lo, v5, 0xffffc000
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, -1, v6, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v9, vcc_lo, 0xffffc000, v5
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v10, vcc_lo, -1, v6, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v11, vcc_lo, 0xffffd000, v5
+; GFX11-NEXT:    v_add_co_u32 v7, vcc_lo, v4, 0xffffc000
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, -1, v5, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v9, vcc_lo, 0xffffc000, v4
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v10, vcc_lo, -1, v5, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v11, vcc_lo, 0xffffd000, v4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_load_b64 v[13:14], v[7:8], off offset:-4096
 ; GFX11-NEXT:    global_load_b64 v[9:10], v[9:10], off offset:-2048
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v12, vcc_lo, -1, v6, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v15, vcc_lo, v5, 0xffffe000
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v16, vcc_lo, -1, v6, vcc_lo
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v12, vcc_lo, -1, v5, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v15, vcc_lo, v4, 0xffffe000
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v16, vcc_lo, -1, v5, vcc_lo
 ; GFX11-NEXT:    global_load_b64 v[11:12], v[11:12], off offset:-2048
-; GFX11-NEXT:    v_add_co_u32 v17, vcc_lo, 0xffffe000, v5
+; GFX11-NEXT:    v_add_co_u32 v17, vcc_lo, 0xffffe000, v4
 ; GFX11-NEXT:    s_clause 0x1
 ; GFX11-NEXT:    global_load_b64 v[19:20], v[15:16], off offset:-4096
 ; GFX11-NEXT:    global_load_b64 v[7:8], v[7:8], off
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v18, vcc_lo, -1, v6, vcc_lo
-; GFX11-NEXT:    v_add_co_u32 v21, vcc_lo, 0xfffff000, v5
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v22, vcc_lo, -1, v6, vcc_lo
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v18, vcc_lo, -1, v5, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v21, vcc_lo, 0xfffff000, v4
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v22, vcc_lo, -1, v5, vcc_lo
 ; GFX11-NEXT:    s_clause 0x5
 ; GFX11-NEXT:    global_load_b64 v[17:18], v[17:18], off offset:-2048
 ; GFX11-NEXT:    global_load_b64 v[15:16], v[15:16], off
 ; GFX11-NEXT:    global_load_b64 v[21:22], v[21:22], off offset:-2048
-; GFX11-NEXT:    global_load_b64 v[23:24], v[5:6], off offset:-4096
-; GFX11-NEXT:    global_load_b64 v[25:26], v[5:6], off offset:-2048
-; GFX11-NEXT:    global_load_b64 v[27:28], v[5:6], off
-; GFX11-NEXT:    v_add_co_u32 v5, vcc_lo, 0x10000, v5
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v6, vcc_lo, 0, v6, vcc_lo
+; GFX11-NEXT:    global_load_b64 v[23:24], v[4:5], off offset:-4096
+; GFX11-NEXT:    global_load_b64 v[25:26], v[4:5], off offset:-2048
+; GFX11-NEXT:    global_load_b64 v[27:28], v[4:5], off
+; GFX11-NEXT:    v_add_co_u32 v4, vcc_lo, 0x10000, v4
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v5, vcc_lo, 0, v5, vcc_lo
 ; GFX11-NEXT:    s_addk_i32 s2, 0x2000
 ; GFX11-NEXT:    s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(VALU_DEP_1)
 ; GFX11-NEXT:    s_cmp_gt_u32 s2, 0x3fffff
 ; GFX11-NEXT:    s_waitcnt vmcnt(10)
-; GFX11-NEXT:    v_add_co_u32 v3, s0, v13, v3
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v14, v4, s0
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v13, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v14, v3, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(9)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v3, s0, v9, v3
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v10, v4, s0
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v9, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v10, v3, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(6)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v3, s0, v7, v3
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v8, v4, s0
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v7, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v8, v3, s0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v3, s0, v11, v3
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v12, v4, s0
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v11, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v12, v3, s0
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v3, s0, v19, v3
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v20, v4, s0
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v19, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v20, v3, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(5)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v3, s0, v17, v3
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v18, v4, s0
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v17, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v18, v3, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(4)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v3, s0, v15, v3
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v16, v4, s0
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v15, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v16, v3, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(3)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v3, s0, v21, v3
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v22, v4, s0
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v21, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v22, v3, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(2)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v3, s0, v23, v3
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v24, v4, s0
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v23, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v24, v3, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(1)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
-; GFX11-NEXT:    v_add_co_u32 v3, s0, v25, v3
-; GFX11-NEXT:    v_add_co_ci_u32_e64 v4, s0, v26, v4, s0
+; GFX11-NEXT:    v_add_co_u32 v2, s0, v25, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e64 v3, s0, v26, v3, s0
 ; GFX11-NEXT:    s_waitcnt vmcnt(0)
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
-; GFX11-NEXT:    v_add_co_u32 v3, vcc_lo, v27, v3
-; GFX11-NEXT:    v_add_co_ci_u32_e32 v4, vcc_lo, v28, v4, vcc_lo
+; GFX11-NEXT:    v_add_co_u32 v2, vcc_lo, v27, v2
+; GFX11-NEXT:    v_add_co_ci_u32_e32 v3, vcc_lo, v28, v3, vcc_lo
 ; GFX11-NEXT:    s_cbranch_scc0 .LBB1_2
 ; GFX11-NEXT:  ; %bb.3: ; %while.cond.loopexit
 ; GFX11-NEXT:    ; in Loop: Header=BB1_1 Depth=1
@@ -927,10 +926,10 @@ define hidden amdgpu_kernel void @clmem_read(ptr addrspace(1)  %buffer) {
 ; GFX11-NEXT:    s_mov_b32 s1, s0
 ; GFX11-NEXT:    s_branch .LBB1_1
 ; GFX11-NEXT:  .LBB1_5: ; %while.end
-; GFX11-NEXT:    v_add_co_u32 v0, s0, s34, v0
+; GFX11-NEXT:    v_add_co_u32 v0, s0, s34, v6
 ; GFX11-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; GFX11-NEXT:    v_add_co_ci_u32_e64 v1, null, s35, 0, s0
-; GFX11-NEXT:    global_store_b64 v[0:1], v[3:4], off
+; GFX11-NEXT:    global_store_b64 v[0:1], v[2:3], off
 ; GFX11-NEXT:    s_nop 0
 ; GFX11-NEXT:    s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
 ; GFX11-NEXT:    s_endpgm

diff  --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index adc4c85aa60dc0..c48370a9c6c75b 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -164,33 +164,33 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s12
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s13
-; GCN-IR-NEXT:    s_min_u32 s18, s8, s9
-; GCN-IR-NEXT:    s_sub_u32 s16, s14, s18
+; GCN-IR-NEXT:    s_min_u32 s20, s8, s9
+; GCN-IR-NEXT:    s_sub_u32 s16, s14, s20
 ; GCN-IR-NEXT:    s_subb_u32 s17, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[20:21], s[16:17], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[16:17], 63
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[22:23], s[16:17], 63
-; GCN-IR-NEXT:    s_or_b64 s[20:21], s[10:11], s[20:21]
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[20:21], exec
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[10:11], s[18:19]
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[18:19], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s11, 0, s13
 ; GCN-IR-NEXT:    s_cselect_b32 s10, 0, s12
-; GCN-IR-NEXT:    s_or_b64 s[20:21], s[20:21], s[22:23]
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[22:23]
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[20:21]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s20, s16, 1
-; GCN-IR-NEXT:    s_addc_u32 s21, s17, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[20:21], 0
+; GCN-IR-NEXT:    s_add_u32 s18, s16, 1
+; GCN-IR-NEXT:    s_addc_u32 s19, s17, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
 ; GCN-IR-NEXT:    s_sub_i32 s16, 63, s16
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[12:13], s16
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[12:13], s20
-; GCN-IR-NEXT:    s_add_u32 s19, s6, -1
-; GCN-IR-NEXT:    s_addc_u32 s20, s7, -1
+; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[12:13], s18
+; GCN-IR-NEXT:    s_add_u32 s18, s6, -1
+; GCN-IR-NEXT:    s_addc_u32 s19, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_add_u32 s12, s8, s18
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s20
 ; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
@@ -201,8 +201,8 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[8:9]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[14:15], s[10:11]
-; GCN-IR-NEXT:    s_sub_u32 s8, s19, s16
-; GCN-IR-NEXT:    s_subb_u32 s8, s20, s17
+; GCN-IR-NEXT:    s_sub_u32 s8, s18, s16
+; GCN-IR-NEXT:    s_subb_u32 s8, s19, s17
 ; GCN-IR-NEXT:    s_ashr_i32 s14, s8, 31
 ; GCN-IR-NEXT:    s_mov_b32 s15, s14
 ; GCN-IR-NEXT:    s_and_b32 s8, s14, 1
@@ -211,9 +211,9 @@ define amdgpu_kernel void @s_test_sdiv(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_subb_u32 s17, s17, s15
 ; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
 ; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[22:23], s[12:13], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[8:9]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[22:23]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow7
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[10:11], 1
@@ -362,85 +362,85 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_sdiv:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v0
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v5, 31, v3
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v4, v1
-; GCN-IR-NEXT:    v_sub_i32_e32 v10, vcc, v0, v4
-; GCN-IR-NEXT:    v_subb_u32_e32 v11, vcc, v1, v4, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v5, v2
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v5
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v12, v0
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v13, 31, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v12, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v7, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v13, v2
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v13, v3
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v13
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v12, v2, v3
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v10
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v6
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[6:7], 32, v2
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v11
-; GCN-IR-NEXT:    v_min_u32_e32 v13, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v12, v13
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v7
+; GCN-IR-NEXT:    v_min_u32_e32 v11, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[6:7], v10, v11
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[6:7]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[6:7], 0, 0, s[6:7]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[2:3]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, v4
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, v5
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v9, v11, 0, s[4:5]
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v8, v10, 0, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v14, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v15, v13
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v7, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v6, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[14:15]
-; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[10:11], v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[6:7], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v9, v12
-; GCN-IR-NEXT:    v_lshr_b64 v[14:15], v[10:11], v14
-; GCN-IR-NEXT:    v_not_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, v9, v13
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v8, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v5, v10
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[6:7], v8
+; GCN-IR-NEXT:    v_not_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v5, v11
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v4, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[14:15], v[14:15], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v8, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v14, v14, v8
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v16, v14
-; GCN-IR-NEXT:    v_subb_u32_e32 v8, vcc, v17, v15, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v12, v2
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v13, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[10:11]
-; GCN-IR-NEXT:    v_sub_i32_e64 v14, s[4:5], v14, v12
-; GCN-IR-NEXT:    v_subb_u32_e64 v15, s[4:5], v15, v13, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v9
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v16, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v17, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
@@ -448,14 +448,14 @@ define i64 @v_test_sdiv(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v9, v9, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB1_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v5, v4
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v7, v6
-; GCN-IR-NEXT:    v_xor_b32_e32 v3, v8, v0
-; GCN-IR-NEXT:    v_xor_b32_e32 v2, v9, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v13, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v15, v14
+; GCN-IR-NEXT:    v_xor_b32_e32 v3, v4, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v2, v5, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v3, v0
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v2, v1, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
@@ -1001,33 +1001,33 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 %
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s12
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s13
-; GCN-IR-NEXT:    s_min_u32 s18, s8, s9
-; GCN-IR-NEXT:    s_sub_u32 s16, s14, s18
+; GCN-IR-NEXT:    s_min_u32 s20, s8, s9
+; GCN-IR-NEXT:    s_sub_u32 s16, s14, s20
 ; GCN-IR-NEXT:    s_subb_u32 s17, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[20:21], s[16:17], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[16:17], 63
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[22:23], s[16:17], 63
-; GCN-IR-NEXT:    s_or_b64 s[20:21], s[10:11], s[20:21]
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[20:21], exec
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[10:11], s[18:19]
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[18:19], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s11, 0, s13
 ; GCN-IR-NEXT:    s_cselect_b32 s10, 0, s12
-; GCN-IR-NEXT:    s_or_b64 s[20:21], s[20:21], s[22:23]
+; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[22:23]
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[20:21]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[18:19]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s20, s16, 1
-; GCN-IR-NEXT:    s_addc_u32 s21, s17, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[20:21], 0
+; GCN-IR-NEXT:    s_add_u32 s18, s16, 1
+; GCN-IR-NEXT:    s_addc_u32 s19, s17, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
 ; GCN-IR-NEXT:    s_sub_i32 s16, 63, s16
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[12:13], s16
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[12:13], s20
-; GCN-IR-NEXT:    s_add_u32 s19, s6, -1
-; GCN-IR-NEXT:    s_addc_u32 s20, s7, -1
+; GCN-IR-NEXT:    s_lshr_b64 s[16:17], s[12:13], s18
+; GCN-IR-NEXT:    s_add_u32 s18, s6, -1
+; GCN-IR-NEXT:    s_addc_u32 s19, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_add_u32 s12, s8, s18
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s20
 ; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
@@ -1038,8 +1038,8 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 %
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[10:11], 1
 ; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[8:9]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], s[14:15], s[10:11]
-; GCN-IR-NEXT:    s_sub_u32 s8, s19, s16
-; GCN-IR-NEXT:    s_subb_u32 s8, s20, s17
+; GCN-IR-NEXT:    s_sub_u32 s8, s18, s16
+; GCN-IR-NEXT:    s_subb_u32 s8, s19, s17
 ; GCN-IR-NEXT:    s_ashr_i32 s14, s8, 31
 ; GCN-IR-NEXT:    s_mov_b32 s15, s14
 ; GCN-IR-NEXT:    s_and_b32 s8, s14, 1
@@ -1048,9 +1048,9 @@ define amdgpu_kernel void @s_test_sdiv24_48(ptr addrspace(1) %out, i48 %x, i48 %
 ; GCN-IR-NEXT:    s_subb_u32 s17, s17, s15
 ; GCN-IR-NEXT:    s_add_u32 s12, s12, 1
 ; GCN-IR-NEXT:    s_addc_u32 s13, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[22:23], s[12:13], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], s[8:9]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[22:23]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[20:21]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_3
 ; GCN-IR-NEXT:  .LBB9_4: ; %Flow4
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[10:11], 1
@@ -1206,32 +1206,32 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s10, s2
 ; GCN-IR-NEXT:    s_add_i32 s10, s10, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s11, s3
-; GCN-IR-NEXT:    s_min_u32 s10, s10, s11
-; GCN-IR-NEXT:    s_add_u32 s12, s10, 0xffffffc5
-; GCN-IR-NEXT:    s_addc_u32 s13, 0, -1
+; GCN-IR-NEXT:    s_min_u32 s14, s10, s11
+; GCN-IR-NEXT:    s_add_u32 s10, s14, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s11, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[10:11], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[10:11], 63
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[8:9], s[12:13]
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[12:13], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
+; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[16:17]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
-; GCN-IR-NEXT:    s_sub_i32 s11, 63, s12
+; GCN-IR-NEXT:    s_add_u32 s12, s10, 1
+; GCN-IR-NEXT:    s_addc_u32 s13, s11, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[12:13], 0
+; GCN-IR-NEXT:    s_sub_i32 s10, 63, s10
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
-; GCN-IR-NEXT:    s_lshl_b64 s[8:9], 24, s11
+; GCN-IR-NEXT:    s_lshl_b64 s[8:9], 24, s10
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], 24, s14
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], 24, s12
 ; GCN-IR-NEXT:    s_add_u32 s16, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s17, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s10, 58, s10
+; GCN-IR-NEXT:    s_sub_u32 s10, 58, s14
 ; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -1385,87 +1385,87 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v2, v1
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 32, v4
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v12, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v12, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v5, vcc, s6, v8
-; GCN-IR-NEXT:    v_addc_u32_e64 v6, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[5:6]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[5:6]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[2:3]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, 24, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, 24, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v5
-; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v6, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v5
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[9:10]
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], 24, v4
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], 24, v9
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, 58, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB11_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v11, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB11_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v7, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v7, v6, v0
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB11_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v7, v2
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v4, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 24, %x
   ret i64 %result
@@ -1578,89 +1578,89 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_num_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v2, v1
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 32, v4
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v12, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v12, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v5, vcc, s6, v8
-; GCN-IR-NEXT:    v_addc_u32_e64 v6, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[5:6]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[5:6]
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0x8000
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[2:3]
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0x8000
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v7, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, v2
-; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v5
-; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v6, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[9:10]
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], s[4:5], v4
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[8:9], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[8:9]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], s[4:5], v9
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, 47, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], 0, 0, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v14, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v15, v11, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v1
-; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v0
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v14, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, v15, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v1
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v7, v1
-; GCN-IR-NEXT:    v_or_b32_e32 v7, v6, v0
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[2:3], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v0
 ; GCN-IR-NEXT:  .LBB12_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v7, v2
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v4, v3
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v4, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v5, v13
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 32768, %x
   ret i64 %result
@@ -1680,84 +1680,84 @@ define i64 @v_test_sdiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_sdiv_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v0
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v2, v1
-; GCN-IR-NEXT:    v_sub_i32_e32 v7, vcc, v0, v2
-; GCN-IR-NEXT:    v_subb_u32_e32 v8, vcc, v1, v2, vcc
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v0, v7
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v10, v0
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v10, v1
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, v1, v10, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v0, v4
 ; GCN-IR-NEXT:    v_add_i32_e64 v0, s[4:5], 32, v0
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v1, v8
-; GCN-IR-NEXT:    v_min_u32_e32 v0, v0, v1
-; GCN-IR-NEXT:    v_sub_i32_e64 v3, s[4:5], 48, v0
-; GCN-IR-NEXT:    v_subb_u32_e64 v4, s[4:5], 0, 0, s[4:5]
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[7:8]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[3:4]
-; GCN-IR-NEXT:    v_mov_b32_e32 v1, v2
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v1, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v0, v1
+; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 48, v8
+; GCN-IR-NEXT:    v_subb_u32_e64 v1, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[0:1]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v10
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[3:4]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[0:1]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v8, 0, s[4:5]
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v7, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v3, v5, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v2, v4, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v4, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v3, s[4:5], 63, v3
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[9:10]
-; GCN-IR-NEXT:    v_lshl_b64 v[3:4], v[7:8], v3
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v0
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v1, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v0, s[4:5], 63, v0
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[4:5], v0
+; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[9:10], v[7:8], v9
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 0xffffffcf, v0
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v8, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[4:5], v6
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[4:5], 0, -1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v3, 0
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[9:10], v[9:10], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v0, 31, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v0, v9, v0
-; GCN-IR-NEXT:    v_sub_i32_e32 v5, vcc, s12, v0
-; GCN-IR-NEXT:    v_subb_u32_e32 v5, vcc, 0, v10, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v7
-; GCN-IR-NEXT:    v_lshl_b64 v[3:4], v[3:4], 1
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v9, 31, v5
-; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v8, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v5, 1, v9
-; GCN-IR-NEXT:    v_and_b32_e32 v9, 0x8000, v9
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[7:8]
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v9, s[4:5], v0, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v10, s[4:5], 0, v10, s[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v2, 31, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v2
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, s12, v6
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
+; GCN-IR-NEXT:    v_subb_u32_e32 v2, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 1, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v0, v8, v0
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v2, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[4:5]
+; GCN-IR-NEXT:    v_or_b32_e32 v1, v9, v1
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v3
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v2
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[3:4], v[3:4], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
+; GCN-IR-NEXT:    v_lshl_b64 v[0:1], v[0:1], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v3, v1
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v2, v0
 ; GCN-IR-NEXT:  .LBB13_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v5, v2
-; GCN-IR-NEXT:    v_xor_b32_e32 v3, v6, v1
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v3, v1, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v2, v10
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v3, v11
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v11, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = sdiv i64 %x, 32768
   ret i64 %result

diff  --git a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
index b3b6319d70878d..9bac6bbd975957 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
+++ b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
@@ -1,5 +1,5 @@
 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -amdgpu-dce-in-ra=0 -stress-regalloc=1 -start-before=register-coalescer -stop-after=greedy,1 -o - %s | FileCheck %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -verify-machineinstrs -amdgpu-enable-rewrite-partial-reg-uses=false -amdgpu-dce-in-ra=0 -stress-regalloc=1 -start-before=register-coalescer -stop-after=greedy,1 -o - %s | FileCheck %s
 # https://bugs.llvm.org/show_bug.cgi?id=33620
 
 ---

diff  --git a/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll b/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll
index 314785cdbefd61..f1db395b351824 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-vgpr.ll
@@ -136,13 +136,13 @@ define amdgpu_kernel void @max_256_vgprs_spill_9x32(ptr addrspace(1) %p) #1 {
 ; GFX908-DAG: v_accvgpr_read_b32
 
 ; GFX900: NumVgprs: 256
-; GFX908: NumVgprs: 254
-; GFX900: ScratchSize: 1796
+; GFX908: NumVgprs: 252
+; GFX900: ScratchSize: 132
 ; GFX908: ScratchSize: 0
 ; GFX900: VGPRBlocks: 63
-; GFX908: VGPRBlocks: 63
+; GFX908: VGPRBlocks: 62
 ; GFX900: NumVGPRsForWavesPerEU: 256
-; GFX908: NumVGPRsForWavesPerEU: 254
+; GFX908: NumVGPRsForWavesPerEU: 252
 define amdgpu_kernel void @max_256_vgprs_spill_9x32_2bb(ptr addrspace(1) %p) #1 {
   %tid = call i32 @llvm.amdgcn.workitem.id.x()
   %p1 = getelementptr inbounds <32 x float>, ptr addrspace(1) %p, i32 %tid

diff  --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index bfb409438757c7..ac212d22e9cfa7 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -135,34 +135,34 @@ define amdgpu_kernel void @s_test_srem(ptr addrspace(1) %out, i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
 ; GCN-IR-NEXT:    s_min_u32 s10, s10, s11
-; GCN-IR-NEXT:    s_min_u32 s14, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s14
+; GCN-IR-NEXT:    s_min_u32 s18, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s18
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[12:13], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[8:9], s[16:17]
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[16:17], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s9, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
 ; GCN-IR-NEXT:    s_mov_b64 s[6:7], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_mov_b32 s11, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s16, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s17, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[16:17], 0
+; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s16
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s14
 ; GCN-IR-NEXT:    s_add_u32 s16, s4, -1
 ; GCN-IR-NEXT:    s_addc_u32 s17, s5, -1
 ; GCN-IR-NEXT:    s_not_b64 s[6:7], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s10, s6, s14
+; GCN-IR-NEXT:    s_add_u32 s10, s6, s18
 ; GCN-IR-NEXT:    s_addc_u32 s11, s7, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -339,107 +339,107 @@ define i64 @v_test_srem(i64 %x, i64 %y) {
 ; GCN-IR-LABEL: v_test_srem:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v6, 31, v3
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v4
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
-; GCN-IR-NEXT:    v_xor_b32_e32 v2, v2, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v4, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v3, v3, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v2, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v3, vcc, v3, v6, vcc
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v6, v2
-; GCN-IR-NEXT:    v_add_i32_e64 v6, s[6:7], 32, v6
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v7, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v10, v6, v7
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v6, v0
-; GCN-IR-NEXT:    v_add_i32_e64 v6, s[6:7], 32, v6
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v7, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v11, v6, v7
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[6:7], v10, v11
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v14, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v4, 31, v3
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v14
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
+; GCN-IR-NEXT:    v_xor_b32_e32 v2, v2, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v14, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v3, v3, v4
+; GCN-IR-NEXT:    v_sub_i32_e32 v2, vcc, v2, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v3, vcc, v3, v4, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
+; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
+; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
+; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[6:7], 0, 0, s[6:7]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[6:7]
+; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[6:7], 63, v[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[6:7]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, v4
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v9, v1, 0, s[4:5]
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v8, v0, 0, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v15, v14
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, 1, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, 0, v7, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[12:13]
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[0:1], v6
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v16, vcc, -1, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v17, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v9, v10
-; GCN-IR-NEXT:    v_lshr_b64 v[12:13], v[0:1], v12
-; GCN-IR-NEXT:    v_not_b32_e32 v8, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, v9, v11
-; GCN-IR-NEXT:    v_mov_b32_e32 v14, 0
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v8, vcc
+; GCN-IR-NEXT:    v_not_b32_e32 v7, v12
+; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
+; GCN-IR-NEXT:    v_not_b32_e32 v6, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v7, v13
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v6, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v15, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[12:13], v[12:13], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v8, 31, v7
-; GCN-IR-NEXT:    v_or_b32_e32 v12, v12, v8
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v8, vcc, v16, v12
-; GCN-IR-NEXT:    v_subb_u32_e32 v8, vcc, v17, v13, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v14, v6
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v14, 31, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v10
-; GCN-IR-NEXT:    v_or_b32_e32 v7, v15, v7
-; GCN-IR-NEXT:    v_and_b32_e32 v8, 1, v14
-; GCN-IR-NEXT:    v_and_b32_e32 v15, v14, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v14, v14, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v11, vcc
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[10:11]
-; GCN-IR-NEXT:    v_sub_i32_e64 v12, s[4:5], v12, v14
-; GCN-IR-NEXT:    v_subb_u32_e64 v13, s[4:5], v13, v15, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v15, v9
+; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
+; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v16, v10
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v17, v11, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
+; GCN-IR-NEXT:    v_and_b32_e32 v13, v12, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v12, v12, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
+; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
+; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v13, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v14, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB1_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v9, v9, v7
-; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
+; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:  .LBB1_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_mul_lo_u32 v6, v2, v9
-; GCN-IR-NEXT:    v_mul_hi_u32 v7, v2, v8
-; GCN-IR-NEXT:    v_mul_lo_u32 v3, v3, v8
-; GCN-IR-NEXT:    v_mul_lo_u32 v2, v2, v8
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v6, v3
+; GCN-IR-NEXT:    v_mul_lo_u32 v4, v2, v7
+; GCN-IR-NEXT:    v_mul_hi_u32 v5, v2, v6
+; GCN-IR-NEXT:    v_mul_lo_u32 v3, v3, v6
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v2, v6
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v4
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v5
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v14
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v15
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v14
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v15, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, %y
   ret i64 %result
@@ -1037,33 +1037,33 @@ define amdgpu_kernel void @s_test_srem33_64(ptr addrspace(1) %out, i64 %x, i64 %
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
-; GCN-IR-NEXT:    s_min_u32 s16, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s14, s12, s16
+; GCN-IR-NEXT:    s_min_u32 s20, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s14, s12, s20
 ; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[14:15], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[14:15], 63
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[10:11], s[18:19]
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[18:19], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[14:15], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[14:15], 63
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[10:11], s[16:17]
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[16:17], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s11, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s10, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[20:21]
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
 ; GCN-IR-NEXT:    s_mov_b64 s[6:7], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[18:19]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s18, s14, 1
-; GCN-IR-NEXT:    s_addc_u32 s19, s15, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
+; GCN-IR-NEXT:    s_add_u32 s16, s14, 1
+; GCN-IR-NEXT:    s_addc_u32 s17, s15, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[16:17], 0
 ; GCN-IR-NEXT:    s_sub_i32 s14, 63, s14
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[2:3], s14
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[2:3], s18
+; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[2:3], s16
 ; GCN-IR-NEXT:    s_add_u32 s18, s8, -1
 ; GCN-IR-NEXT:    s_addc_u32 s19, s9, -1
 ; GCN-IR-NEXT:    s_not_b64 s[6:7], s[12:13]
-; GCN-IR-NEXT:    s_add_u32 s12, s6, s16
+; GCN-IR-NEXT:    s_add_u32 s12, s6, s20
 ; GCN-IR-NEXT:    s_addc_u32 s13, s7, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -1188,33 +1188,33 @@ define amdgpu_kernel void @s_test_srem24_48(ptr addrspace(1) %out, i48 %x, i48 %
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s4
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s5
-; GCN-IR-NEXT:    s_min_u32 s16, s8, s9
-; GCN-IR-NEXT:    s_sub_u32 s14, s12, s16
+; GCN-IR-NEXT:    s_min_u32 s20, s8, s9
+; GCN-IR-NEXT:    s_sub_u32 s14, s12, s20
 ; GCN-IR-NEXT:    s_subb_u32 s15, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[18:19], s[14:15], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[20:21], s[14:15], 63
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[10:11], s[18:19]
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[18:19], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[14:15], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[14:15], 63
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[10:11], s[16:17]
+; GCN-IR-NEXT:    s_and_b64 s[10:11], s[16:17], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s11, 0, s5
 ; GCN-IR-NEXT:    s_cselect_b32 s10, 0, s4
-; GCN-IR-NEXT:    s_or_b64 s[18:19], s[18:19], s[20:21]
+; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
 ; GCN-IR-NEXT:    s_mov_b64 s[8:9], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[18:19]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s18, s14, 1
-; GCN-IR-NEXT:    s_addc_u32 s19, s15, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[18:19], 0
+; GCN-IR-NEXT:    s_add_u32 s16, s14, 1
+; GCN-IR-NEXT:    s_addc_u32 s17, s15, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[16:17], 0
 ; GCN-IR-NEXT:    s_sub_i32 s14, 63, s14
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[10:11], s[4:5], s14
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB9_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[4:5], s18
+; GCN-IR-NEXT:    s_lshr_b64 s[14:15], s[4:5], s16
 ; GCN-IR-NEXT:    s_add_u32 s18, s6, -1
 ; GCN-IR-NEXT:    s_addc_u32 s19, s7, -1
 ; GCN-IR-NEXT:    s_not_b64 s[8:9], s[12:13]
-; GCN-IR-NEXT:    s_add_u32 s12, s8, s16
+; GCN-IR-NEXT:    s_add_u32 s12, s8, s20
 ; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[16:17], 0
 ; GCN-IR-NEXT:    s_mov_b32 s9, 0
@@ -1396,32 +1396,32 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s2, s4
 ; GCN-IR-NEXT:    s_add_i32 s2, s2, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s3, s5
-; GCN-IR-NEXT:    s_min_u32 s8, s2, s3
-; GCN-IR-NEXT:    s_add_u32 s2, s8, 0xffffffc5
+; GCN-IR-NEXT:    s_min_u32 s12, s2, s3
+; GCN-IR-NEXT:    s_add_u32 s2, s12, 0xffffffc5
 ; GCN-IR-NEXT:    s_addc_u32 s3, 0, -1
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[4:5], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[2:3], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[4:5], 0
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[2:3], 63
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[2:3], 63
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[10:11], s[12:13]
-; GCN-IR-NEXT:    s_and_b64 s[10:11], s[12:13], exec
-; GCN-IR-NEXT:    s_cselect_b32 s10, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[14:15]
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
-; GCN-IR-NEXT:    s_mov_b32 s11, 0
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[8:9], s[10:11]
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[10:11], exec
+; GCN-IR-NEXT:    s_cselect_b32 s8, 0, 24
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
+; GCN-IR-NEXT:    s_mov_b32 s9, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s10, s2, 1
-; GCN-IR-NEXT:    s_addc_u32 s11, s3, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
+; GCN-IR-NEXT:    s_add_u32 s8, s2, 1
+; GCN-IR-NEXT:    s_addc_u32 s9, s3, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
 ; GCN-IR-NEXT:    s_sub_i32 s2, 63, s2
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], 24, s2
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s8
 ; GCN-IR-NEXT:    s_add_u32 s14, s4, -1
 ; GCN-IR-NEXT:    s_addc_u32 s15, s5, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -1448,13 +1448,13 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB10_3
 ; GCN-IR-NEXT:  .LBB10_4: ; %Flow6
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[2:3], 1
-; GCN-IR-NEXT:    s_or_b64 s[10:11], s[6:7], s[2:3]
+; GCN-IR-NEXT:    s_or_b64 s[8:9], s[6:7], s[2:3]
 ; GCN-IR-NEXT:  .LBB10_5: ; %udiv-end
-; GCN-IR-NEXT:    v_mov_b32_e32 v0, s10
+; GCN-IR-NEXT:    v_mov_b32_e32 v0, s8
 ; GCN-IR-NEXT:    v_mul_hi_u32 v0, s4, v0
-; GCN-IR-NEXT:    s_mul_i32 s6, s4, s11
-; GCN-IR-NEXT:    s_mul_i32 s5, s5, s10
-; GCN-IR-NEXT:    s_mul_i32 s4, s4, s10
+; GCN-IR-NEXT:    s_mul_i32 s6, s4, s9
+; GCN-IR-NEXT:    s_mul_i32 s5, s5, s8
+; GCN-IR-NEXT:    s_mul_i32 s4, s4, s8
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, s6, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, s5, v0
 ; GCN-IR-NEXT:    v_sub_i32_e64 v0, vcc, 24, s4
@@ -1582,25 +1582,25 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffc5
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, s6, v6
-; GCN-IR-NEXT:    v_addc_u32_e64 v4, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[3:4]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[3:4]
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[2:3]
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, 24, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, 24, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB11_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v4, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v3
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], 24, v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1610,8 +1610,8 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v7
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], 24, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 58, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
@@ -1645,15 +1645,15 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB11_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[2:3], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v7
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v4, v6
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
 ; GCN-IR-NEXT:  .LBB11_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v2
-; GCN-IR-NEXT:    v_mul_hi_u32 v3, v0, v5
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v5
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v5
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v5
+; GCN-IR-NEXT:    v_mul_hi_u32 v3, v0, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v4
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
@@ -1773,27 +1773,27 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
 ; GCN-IR-NEXT:    s_movk_i32 s6, 0xffd0
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, s6, v6
-; GCN-IR-NEXT:    v_addc_u32_e64 v4, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, s6, v10
+; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[3:4]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[3:4]
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0x8000
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[2:3]
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0x8000
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v5, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v4, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1803,8 +1803,8 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v7
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
@@ -1838,15 +1838,15 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB12_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[2:3], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v7
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v4, v6
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
 ; GCN-IR-NEXT:  .LBB12_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v2
-; GCN-IR-NEXT:    v_mul_hi_u32 v3, v0, v5
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v5
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v5
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v5
+; GCN-IR-NEXT:    v_mul_hi_u32 v3, v0, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v4
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 0x8000, v0
@@ -1872,87 +1872,87 @@ define i64 @v_test_srem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-LABEL: v_test_srem_pow2_k_den_i64:
 ; GCN-IR:       ; %bb.0: ; %_udiv-special-cases
 ; GCN-IR-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v2, 31, v1
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v2
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v2
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v2, vcc
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v0
-; GCN-IR-NEXT:    v_add_i32_e64 v3, s[4:5], 32, v3
-; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v3, v4
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v8
-; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v1
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v12
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v12, vcc
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
+; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
+; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v3, v2
+; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
+; GCN-IR-NEXT:    v_mov_b32_e32 v13, v12
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], vcc, s[4:5]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[4:5]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 63, v[2:3]
 ; GCN-IR-NEXT:    s_xor_b64 s[6:7], s[4:5], -1
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v7, v1, 0, s[4:5]
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v6, v0, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v1, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v0, 0, s[4:5]
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[6:7], vcc
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v9, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v10, vcc, 0, v5, vcc
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[9:10]
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
-; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[4:5], vcc
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB13_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v9
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 0xffffffcf, v8
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
-; GCN-IR-NEXT:    v_addc_u32_e64 v9, s[4:5], 0, -1, vcc
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
-; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB13_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
-; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, s12, v10
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, 0, v11, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v8
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v12, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v12, 31, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v9, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v12
-; GCN-IR-NEXT:    v_and_b32_e32 v12, 0x8000, v12
-; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[8:9]
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v13, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v12
-; GCN-IR-NEXT:    v_mov_b32_e32 v13, v7
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v11, s[4:5], 0, v11, s[4:5]
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
+; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v4
+; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v8
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v9, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v10, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v10, 0x8000, v10
+; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[6:7]
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v11, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v5
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v9, s[4:5], 0, v9, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v12, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB13_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB13_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v7, v7, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
 ; GCN-IR-NEXT:  .LBB13_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[6:7], 15
-; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v4
-; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v5, vcc
-; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v2
-; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v3
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[4:5], 15
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v2
 ; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v3, vcc
+; GCN-IR-NEXT:    v_xor_b32_e32 v0, v0, v12
+; GCN-IR-NEXT:    v_xor_b32_e32 v1, v1, v13
+; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, v0, v12
+; GCN-IR-NEXT:    v_subb_u32_e32 v1, vcc, v1, v13, vcc
 ; GCN-IR-NEXT:    s_setpc_b64 s[30:31]
   %result = srem i64 %x, 32768
   ret i64 %result

diff  --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index 6102d652402a21..9301170c034d89 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -136,34 +136,34 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
 ; GCN-IR-NEXT:    s_min_u32 s10, s10, s11
-; GCN-IR-NEXT:    s_min_u32 s14, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s14
+; GCN-IR-NEXT:    s_min_u32 s16, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s16
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[8:9], s[16:17]
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[16:17], exec
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s9, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
 ; GCN-IR-NEXT:    s_mov_b64 s[6:7], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_mov_b32 s11, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s16, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s17, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[16:17], 0
+; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s16
-; GCN-IR-NEXT:    s_add_u32 s15, s4, -1
-; GCN-IR-NEXT:    s_addc_u32 s16, s5, -1
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s14
+; GCN-IR-NEXT:    s_add_u32 s14, s4, -1
+; GCN-IR-NEXT:    s_addc_u32 s15, s5, -1
 ; GCN-IR-NEXT:    s_not_b64 s[2:3], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s2, s2, s14
+; GCN-IR-NEXT:    s_add_u32 s2, s2, s16
 ; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -174,8 +174,8 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[8:9], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[6:7]
 ; GCN-IR-NEXT:    s_or_b64 s[8:9], s[10:11], s[8:9]
-; GCN-IR-NEXT:    s_sub_u32 s6, s15, s12
-; GCN-IR-NEXT:    s_subb_u32 s6, s16, s13
+; GCN-IR-NEXT:    s_sub_u32 s6, s14, s12
+; GCN-IR-NEXT:    s_subb_u32 s6, s15, s13
 ; GCN-IR-NEXT:    s_ashr_i32 s10, s6, 31
 ; GCN-IR-NEXT:    s_mov_b32 s11, s10
 ; GCN-IR-NEXT:    s_and_b32 s6, s10, 1
@@ -184,9 +184,9 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_subb_u32 s13, s13, s11
 ; GCN-IR-NEXT:    s_add_u32 s2, s2, 1
 ; GCN-IR-NEXT:    s_addc_u32 s3, s3, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[2:3], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[2:3], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[6:7]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_3
 ; GCN-IR-NEXT:  .LBB0_4: ; %Flow7
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[8:9], 1
@@ -319,12 +319,12 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v9, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[6:7], v8, v9
+; GCN-IR-NEXT:    v_min_u32_e32 v11, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[6:7], v10, v11
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[6:7], 0, 0, s[6:7]
@@ -339,10 +339,10 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v6
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v6
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v7, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v6
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
@@ -351,38 +351,38 @@ define i64 @v_test_udiv_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v2
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v10
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v8
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v0, v8
+; GCN-IR-NEXT:    v_not_b32_e32 v0, v10
 ; GCN-IR-NEXT:    v_not_b32_e32 v1, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v9
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, v0, v11
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
 ; GCN-IR-NEXT:  .LBB1_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[10:11], v[10:11], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[8:9], v[8:9], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v6, 31, v5
-; GCN-IR-NEXT:    v_or_b32_e32 v10, v10, v6
+; GCN-IR-NEXT:    v_or_b32_e32 v8, v8, v6
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[4:5], 1
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v10
-; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v11, vcc
-; GCN-IR-NEXT:    v_or_b32_e32 v4, v8, v4
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, v12, v8
+; GCN-IR-NEXT:    v_subb_u32_e32 v6, vcc, v13, v9, vcc
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v10, v4
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v10, 31, v6
 ; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v9, v5
-; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v8
-; GCN-IR-NEXT:    v_and_b32_e32 v9, v8, v3
-; GCN-IR-NEXT:    v_and_b32_e32 v8, v8, v2
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v11, v5
+; GCN-IR-NEXT:    v_and_b32_e32 v6, 1, v10
+; GCN-IR-NEXT:    v_and_b32_e32 v11, v10, v3
+; GCN-IR-NEXT:    v_and_b32_e32 v10, v10, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_sub_i32_e64 v10, s[4:5], v10, v8
-; GCN-IR-NEXT:    v_subb_u32_e64 v11, s[4:5], v11, v9, s[4:5]
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v7
+; GCN-IR-NEXT:    v_sub_i32_e64 v8, s[4:5], v8, v10
+; GCN-IR-NEXT:    v_subb_u32_e64 v9, s[4:5], v9, v11, s[4:5]
+; GCN-IR-NEXT:    v_mov_b32_e32 v11, v7
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v8, v6
+; GCN-IR-NEXT:    v_mov_b32_e32 v10, v6
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB1_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
@@ -804,33 +804,33 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s4, s8
 ; GCN-IR-NEXT:    s_add_i32 s4, s4, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s5, s9
-; GCN-IR-NEXT:    s_min_u32 s14, s4, s5
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s14
+; GCN-IR-NEXT:    s_min_u32 s16, s4, s5
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s16
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[6:7], s[16:17]
-; GCN-IR-NEXT:    s_and_b64 s[6:7], s[16:17], exec
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[6:7], s[14:15]
+; GCN-IR-NEXT:    s_and_b64 s[6:7], s[14:15], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s7, 0, s9
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, s8
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[18:19]
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s16, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s17, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[16:17], 0
+; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[14:15], 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[8:9], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[8:9], s16
-; GCN-IR-NEXT:    s_add_u32 s15, s2, -1
-; GCN-IR-NEXT:    s_addc_u32 s16, s3, -1
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[8:9], s14
+; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
+; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
 ; GCN-IR-NEXT:    s_not_b64 s[4:5], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s8, s4, s14
+; GCN-IR-NEXT:    s_add_u32 s8, s4, s16
 ; GCN-IR-NEXT:    s_addc_u32 s9, s5, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
@@ -841,8 +841,8 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[6:7], 1
 ; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[6:7], s[10:11], s[6:7]
-; GCN-IR-NEXT:    s_sub_u32 s4, s15, s12
-; GCN-IR-NEXT:    s_subb_u32 s4, s16, s13
+; GCN-IR-NEXT:    s_sub_u32 s4, s14, s12
+; GCN-IR-NEXT:    s_subb_u32 s4, s15, s13
 ; GCN-IR-NEXT:    s_ashr_i32 s10, s4, 31
 ; GCN-IR-NEXT:    s_mov_b32 s11, s10
 ; GCN-IR-NEXT:    s_and_b32 s4, s10, 1
@@ -851,9 +851,9 @@ define amdgpu_kernel void @s_test_udiv24_i48(ptr addrspace(1) %out, i48 %x, i48
 ; GCN-IR-NEXT:    s_subb_u32 s13, s13, s11
 ; GCN-IR-NEXT:    s_add_u32 s8, s8, 1
 ; GCN-IR-NEXT:    s_addc_u32 s9, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[8:9], 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[8:9], 0
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], s[4:5]
-; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[18:19]
+; GCN-IR-NEXT:    s_and_b64 vcc, exec, s[16:17]
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_3
 ; GCN-IR-NEXT:  .LBB7_4: ; %Flow4
 ; GCN-IR-NEXT:    s_lshl_b64 s[2:3], s[6:7], 1
@@ -989,32 +989,32 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s3
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    s_min_u32 s8, s8, s9
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 0xffffffc5
-; GCN-IR-NEXT:    s_addc_u32 s11, 0, -1
+; GCN-IR-NEXT:    s_min_u32 s12, s8, s9
+; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[10:11], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[10:11], 63
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[6:7], s[12:13]
-; GCN-IR-NEXT:    s_and_b64 s[6:7], s[12:13], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[6:7], s[10:11]
+; GCN-IR-NEXT:    s_and_b64 s[6:7], s[10:11], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[14:15]
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s12, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s13, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[12:13], 0
-; GCN-IR-NEXT:    s_sub_i32 s9, 63, s10
+; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
-; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s9
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB8_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s12
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
 ; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
@@ -1160,8 +1160,8 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffd0, v6
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v4, vcc, 0xffffffd0, v10
 ; GCN-IR-NEXT:    v_addc_u32_e64 v5, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[4:5]
@@ -1175,11 +1175,11 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v4
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1189,8 +1189,8 @@ define i64 @v_test_udiv_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v7
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
@@ -1250,8 +1250,8 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v6
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 48, v8
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
@@ -1264,10 +1264,10 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1275,35 +1275,35 @@ define i64 @v_test_udiv_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB10_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[7:8], v[0:1], v7
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffcf, v6
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffcf, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    s_movk_i32 s12, 0x7fff
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB10_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[7:8], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v7, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, s12, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v8, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v7, 31, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
 ; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v7
-; GCN-IR-NEXT:    v_and_b32_e32 v7, 0x8000, v7
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 0x8000, v8
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v10, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v9, v2
-; GCN-IR-NEXT:    v_sub_i32_e64 v7, s[4:5], v6, v7
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v8, s[4:5], 0, v8, s[4:5]
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB10_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow
@@ -1409,31 +1409,31 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
-; GCN-IR-NEXT:    s_min_u32 s10, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s8, 59, s10
+; GCN-IR-NEXT:    s_min_u32 s12, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s8, 59, s12
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
 ; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[4:5], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s7, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[10:11]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s12, s8, 1
-; GCN-IR-NEXT:    s_addc_u32 s13, s9, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[12:13], 0
+; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
 ; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
 ; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB11_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[8:9], s[2:3], s12
-; GCN-IR-NEXT:    s_add_u32 s2, s10, 0xffffffc4
+; GCN-IR-NEXT:    s_lshr_b64 s[8:9], s[2:3], s10
+; GCN-IR-NEXT:    s_add_u32 s2, s12, 0xffffffc4
 ; GCN-IR-NEXT:    s_addc_u32 s3, 0, -1
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
@@ -1551,8 +1551,8 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 59, v6
+; GCN-IR-NEXT:    v_min_u32_e32 v8, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 59, v8
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[4:5]
@@ -1565,10 +1565,10 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1576,34 +1576,34 @@ define i64 @v_test_udiv_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB12_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[7:8], v[0:1], v7
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc4, v6
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
+; GCN-IR-NEXT:    v_lshr_b64 v[6:7], v[0:1], v6
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 0xffffffc4, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v1, s[4:5], 0, -1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:  .LBB12_3: ; %udiv-do-while
 ; GCN-IR-NEXT:    ; =>This Inner Loop Header: Depth=1
-; GCN-IR-NEXT:    v_lshl_b64 v[7:8], v[7:8], 1
+; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[6:7], 1
 ; GCN-IR-NEXT:    v_lshrrev_b32_e32 v4, 31, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v6, v7, v4
+; GCN-IR-NEXT:    v_or_b32_e32 v6, v6, v4
 ; GCN-IR-NEXT:    v_sub_i32_e32 v4, vcc, 23, v6
-; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v8, vcc
-; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
-; GCN-IR-NEXT:    v_ashrrev_i32_e32 v7, 31, v4
+; GCN-IR-NEXT:    v_subb_u32_e32 v4, vcc, 0, v7, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v0, vcc, 1, v0
+; GCN-IR-NEXT:    v_or_b32_e32 v2, v8, v2
+; GCN-IR-NEXT:    v_ashrrev_i32_e32 v8, 31, v4
 ; GCN-IR-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v7
-; GCN-IR-NEXT:    v_and_b32_e32 v7, 24, v7
+; GCN-IR-NEXT:    v_and_b32_e32 v4, 1, v8
+; GCN-IR-NEXT:    v_and_b32_e32 v8, 24, v8
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
-; GCN-IR-NEXT:    v_or_b32_e32 v3, v10, v3
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v9, v2
-; GCN-IR-NEXT:    v_sub_i32_e64 v7, s[4:5], v6, v7
-; GCN-IR-NEXT:    v_mov_b32_e32 v10, v5
-; GCN-IR-NEXT:    v_subbrev_u32_e64 v8, s[4:5], 0, v8, s[4:5]
+; GCN-IR-NEXT:    v_or_b32_e32 v3, v9, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v6, s[4:5], v6, v8
+; GCN-IR-NEXT:    v_mov_b32_e32 v9, v5
+; GCN-IR-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
 ; GCN-IR-NEXT:    s_or_b64 s[10:11], vcc, s[10:11]
-; GCN-IR-NEXT:    v_mov_b32_e32 v9, v4
+; GCN-IR-NEXT:    v_mov_b32_e32 v8, v4
 ; GCN-IR-NEXT:    s_andn2_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:    s_cbranch_execnz .LBB12_3
 ; GCN-IR-NEXT:  ; %bb.4: ; %Flow

diff  --git a/llvm/test/CodeGen/AMDGPU/urem64.ll b/llvm/test/CodeGen/AMDGPU/urem64.ll
index 7e8661b3ff32dd..784993ccd3bd1a 100644
--- a/llvm/test/CodeGen/AMDGPU/urem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/urem64.ll
@@ -135,34 +135,34 @@ define amdgpu_kernel void @s_test_urem_i64(ptr addrspace(1) %out, i64 %x, i64 %y
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
 ; GCN-IR-NEXT:    s_min_u32 s10, s10, s11
-; GCN-IR-NEXT:    s_min_u32 s14, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s12, s10, s14
+; GCN-IR-NEXT:    s_min_u32 s18, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s12, s10, s18
 ; GCN-IR-NEXT:    s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[16:17], s[12:13], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[8:9], s[16:17]
-; GCN-IR-NEXT:    s_and_b64 s[8:9], s[16:17], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[16:17], s[12:13], 63
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[8:9], s[14:15]
+; GCN-IR-NEXT:    s_and_b64 s[8:9], s[14:15], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s9, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s8, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[16:17], s[16:17], s[18:19]
+; GCN-IR-NEXT:    s_or_b64 s[14:15], s[14:15], s[16:17]
 ; GCN-IR-NEXT:    s_mov_b64 s[6:7], 0
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[16:17]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[14:15]
 ; GCN-IR-NEXT:    s_mov_b32 s11, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s16, s12, 1
-; GCN-IR-NEXT:    s_addc_u32 s17, s13, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[16:17], 0
+; GCN-IR-NEXT:    s_add_u32 s14, s12, 1
+; GCN-IR-NEXT:    s_addc_u32 s15, s13, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
 ; GCN-IR-NEXT:    s_sub_i32 s12, 63, s12
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[8:9]
 ; GCN-IR-NEXT:    s_lshl_b64 s[8:9], s[2:3], s12
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB0_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s16
+; GCN-IR-NEXT:    s_lshr_b64 s[12:13], s[2:3], s14
 ; GCN-IR-NEXT:    s_add_u32 s16, s4, -1
 ; GCN-IR-NEXT:    s_addc_u32 s17, s5, -1
 ; GCN-IR-NEXT:    s_not_b64 s[6:7], s[10:11]
-; GCN-IR-NEXT:    s_add_u32 s10, s6, s14
+; GCN-IR-NEXT:    s_add_u32 s10, s6, s18
 ; GCN-IR-NEXT:    s_addc_u32 s11, s7, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[14:15], 0
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
@@ -328,12 +328,12 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v2
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v3
-; GCN-IR-NEXT:    v_min_u32_e32 v8, v4, v5
+; GCN-IR-NEXT:    v_min_u32_e32 v12, v4, v5
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v4, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v4, s[6:7], 32, v4
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v5, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v9, v4, v5
-; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v8, v9
+; GCN-IR-NEXT:    v_min_u32_e32 v13, v4, v5
+; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[6:7], v12, v13
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[2:3]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
 ; GCN-IR-NEXT:    v_subb_u32_e64 v5, s[6:7], 0, 0, s[6:7]
@@ -348,10 +348,10 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB1_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v10, vcc, 1, v4
-; GCN-IR-NEXT:    v_addc_u32_e32 v11, vcc, 0, v5, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, 1, v4
+; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v5, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v4, s[4:5], 63, v4
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[10:11]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[8:9]
 ; GCN-IR-NEXT:    v_lshl_b64 v[4:5], v[0:1], v4
 ; GCN-IR-NEXT:    v_mov_b32_e32 v6, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v7, 0
@@ -361,10 +361,10 @@ define i64 @v_test_urem_i64(i64 %x, i64 %y) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v14, vcc, -1, v2
 ; GCN-IR-NEXT:    v_addc_u32_e32 v15, vcc, -1, v3, vcc
-; GCN-IR-NEXT:    v_not_b32_e32 v7, v8
-; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v10
+; GCN-IR-NEXT:    v_not_b32_e32 v7, v12
+; GCN-IR-NEXT:    v_lshr_b64 v[10:11], v[0:1], v8
 ; GCN-IR-NEXT:    v_not_b32_e32 v6, 0
-; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v7, v9
+; GCN-IR-NEXT:    v_add_i32_e32 v8, vcc, v7, v13
 ; GCN-IR-NEXT:    v_mov_b32_e32 v12, 0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v9, vcc, 0, v6, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
@@ -817,32 +817,32 @@ define amdgpu_kernel void @s_test_urem_k_num_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s8, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s9, s3
 ; GCN-IR-NEXT:    s_add_i32 s8, s8, 32
-; GCN-IR-NEXT:    s_min_u32 s8, s8, s9
-; GCN-IR-NEXT:    s_add_u32 s10, s8, 0xffffffc5
-; GCN-IR-NEXT:    s_addc_u32 s11, 0, -1
+; GCN-IR-NEXT:    s_min_u32 s12, s8, s9
+; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc5
+; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[12:13], s[10:11], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[10:11], 63
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[6:7], s[12:13]
-; GCN-IR-NEXT:    s_and_b64 s[6:7], s[12:13], exec
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[6:7], s[10:11]
+; GCN-IR-NEXT:    s_and_b64 s[6:7], s[10:11], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, 24
-; GCN-IR-NEXT:    s_or_b64 s[12:13], s[12:13], s[14:15]
-; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[10:11], s[10:11], s[14:15]
+; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[10:11]
 ; GCN-IR-NEXT:    s_mov_b32 s7, 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s12, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s13, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[12:13], 0
-; GCN-IR-NEXT:    s_sub_i32 s9, 63, s10
+; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
-; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s9
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], 24, s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB6_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s12
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], 24, s10
 ; GCN-IR-NEXT:    s_add_u32 s14, s2, -1
 ; GCN-IR-NEXT:    s_addc_u32 s15, s3, -1
-; GCN-IR-NEXT:    s_sub_u32 s8, 58, s8
+; GCN-IR-NEXT:    s_sub_u32 s8, 58, s12
 ; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
@@ -976,31 +976,31 @@ define amdgpu_kernel void @s_test_urem_k_den_i64(ptr addrspace(1) %out, i64 %x)
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s6, s2
 ; GCN-IR-NEXT:    s_flbit_i32_b32 s7, s3
 ; GCN-IR-NEXT:    s_add_i32 s6, s6, 32
-; GCN-IR-NEXT:    s_min_u32 s8, s6, s7
-; GCN-IR-NEXT:    s_sub_u32 s10, 59, s8
-; GCN-IR-NEXT:    s_subb_u32 s11, 0, 0
+; GCN-IR-NEXT:    s_min_u32 s12, s6, s7
+; GCN-IR-NEXT:    s_sub_u32 s8, 59, s12
+; GCN-IR-NEXT:    s_subb_u32 s9, 0, 0
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
-; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[6:7], s[10:11], 63
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[12:13], s[10:11], 63
+; GCN-IR-NEXT:    v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_b64 s[6:7], s[4:5], exec
 ; GCN-IR-NEXT:    s_cselect_b32 s7, 0, s3
 ; GCN-IR-NEXT:    s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[12:13]
+; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], s[10:11]
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[4:5]
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_5
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    s_add_u32 s12, s10, 1
-; GCN-IR-NEXT:    s_addc_u32 s13, s11, 0
-; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[12:13], 0
-; GCN-IR-NEXT:    s_sub_i32 s9, 63, s10
+; GCN-IR-NEXT:    s_add_u32 s10, s8, 1
+; GCN-IR-NEXT:    s_addc_u32 s11, s9, 0
+; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
+; GCN-IR-NEXT:    s_sub_i32 s8, 63, s8
 ; GCN-IR-NEXT:    s_andn2_b64 vcc, exec, s[6:7]
-; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s9
+; GCN-IR-NEXT:    s_lshl_b64 s[6:7], s[2:3], s8
 ; GCN-IR-NEXT:    s_cbranch_vccz .LBB7_4
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    s_lshr_b64 s[10:11], s[2:3], s12
-; GCN-IR-NEXT:    s_add_u32 s8, s8, 0xffffffc4
+; GCN-IR-NEXT:    s_lshr_b64 s[10:11], s[2:3], s10
+; GCN-IR-NEXT:    s_add_u32 s8, s12, 0xffffffc4
 ; GCN-IR-NEXT:    s_addc_u32 s9, 0, -1
 ; GCN-IR-NEXT:    s_mov_b64 s[12:13], 0
 ; GCN-IR-NEXT:    s_mov_b32 s5, 0
@@ -1153,26 +1153,26 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
-; GCN-IR-NEXT:    v_add_i32_e32 v3, vcc, 0xffffffd0, v6
-; GCN-IR-NEXT:    v_addc_u32_e64 v4, s[6:7], 0, -1, vcc
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, 0xffffffd0, v10
+; GCN-IR-NEXT:    v_addc_u32_e64 v3, s[6:7], 0, -1, vcc
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[0:1]
-; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[3:4]
-; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[3:4]
-; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0x8000
+; GCN-IR-NEXT:    v_cmp_lt_u64_e32 vcc, 63, v[2:3]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e64 s[6:7], 63, v[2:3]
+; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0x8000
 ; GCN-IR-NEXT:    s_or_b64 s[4:5], s[4:5], vcc
-; GCN-IR-NEXT:    v_cndmask_b32_e64 v5, v5, 0, s[4:5]
+; GCN-IR-NEXT:    v_cndmask_b32_e64 v4, v4, 0, s[4:5]
 ; GCN-IR-NEXT:    s_xor_b64 s[4:5], s[4:5], -1
-; GCN-IR-NEXT:    v_mov_b32_e32 v2, 0
+; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
 ; GCN-IR-NEXT:    s_and_b64 s[4:5], s[4:5], s[6:7]
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB8_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v3
-; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v4, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[4:5], 0x8000
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], s[4:5], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1182,8 +1182,8 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
 ; GCN-IR-NEXT:    v_add_i32_e32 v12, vcc, -1, v0
 ; GCN-IR-NEXT:    v_addc_u32_e32 v13, vcc, -1, v1, vcc
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v7
-; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], s[4:5], v6
+; GCN-IR-NEXT:    v_sub_i32_e32 v6, vcc, 47, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_subb_u32_e64 v7, s[4:5], 0, 0, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0
@@ -1217,15 +1217,15 @@ define i64 @v_test_urem_pow2_k_num_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[10:11]
 ; GCN-IR-NEXT:  .LBB8_5: ; %Flow4
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[8:9]
-; GCN-IR-NEXT:    v_lshl_b64 v[6:7], v[2:3], 1
-; GCN-IR-NEXT:    v_or_b32_e32 v2, v5, v7
-; GCN-IR-NEXT:    v_or_b32_e32 v5, v4, v6
+; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[2:3], 1
+; GCN-IR-NEXT:    v_or_b32_e32 v5, v5, v3
+; GCN-IR-NEXT:    v_or_b32_e32 v4, v4, v2
 ; GCN-IR-NEXT:  .LBB8_6: ; %Flow5
 ; GCN-IR-NEXT:    s_or_b64 exec, exec, s[6:7]
-; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v2
-; GCN-IR-NEXT:    v_mul_hi_u32 v3, v0, v5
-; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v5
-; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v5
+; GCN-IR-NEXT:    v_mul_lo_u32 v2, v0, v5
+; GCN-IR-NEXT:    v_mul_hi_u32 v3, v0, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v1, v1, v4
+; GCN-IR-NEXT:    v_mul_lo_u32 v0, v0, v4
 ; GCN-IR-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; GCN-IR-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
 ; GCN-IR-NEXT:    v_sub_i32_e32 v0, vcc, 0x8000, v0
@@ -1249,8 +1249,8 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v2, v0
 ; GCN-IR-NEXT:    v_add_i32_e64 v2, s[4:5], 32, v2
 ; GCN-IR-NEXT:    v_ffbh_u32_e32 v3, v1
-; GCN-IR-NEXT:    v_min_u32_e32 v6, v2, v3
-; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v6
+; GCN-IR-NEXT:    v_min_u32_e32 v10, v2, v3
+; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 48, v10
 ; GCN-IR-NEXT:    v_subb_u32_e64 v3, s[4:5], 0, 0, s[4:5]
 ; GCN-IR-NEXT:    v_cmp_eq_u64_e32 vcc, 0, v[0:1]
 ; GCN-IR-NEXT:    v_cmp_lt_u64_e64 s[4:5], 63, v[2:3]
@@ -1263,10 +1263,10 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_and_saveexec_b64 s[6:7], s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_6
 ; GCN-IR-NEXT:  ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT:    v_add_i32_e32 v7, vcc, 1, v2
-; GCN-IR-NEXT:    v_addc_u32_e32 v8, vcc, 0, v3, vcc
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 1, v2
+; GCN-IR-NEXT:    v_addc_u32_e32 v7, vcc, 0, v3, vcc
 ; GCN-IR-NEXT:    v_sub_i32_e64 v2, s[4:5], 63, v2
-; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[7:8]
+; GCN-IR-NEXT:    v_cmp_ne_u64_e32 vcc, 0, v[6:7]
 ; GCN-IR-NEXT:    v_lshl_b64 v[2:3], v[0:1], v2
 ; GCN-IR-NEXT:    v_mov_b32_e32 v4, 0
 ; GCN-IR-NEXT:    v_mov_b32_e32 v5, 0
@@ -1274,8 +1274,8 @@ define i64 @v_test_urem_pow2_k_den_i64(i64 %x) {
 ; GCN-IR-NEXT:    s_xor_b64 s[8:9], exec, s[4:5]
 ; GCN-IR-NEXT:    s_cbranch_execz .LBB9_5
 ; GCN-IR-NEXT:  ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v7
-; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v6
+; GCN-IR-NEXT:    v_lshr_b64 v[8:9], v[0:1], v6
+; GCN-IR-NEXT:    v_add_i32_e32 v6, vcc, 0xffffffcf, v10
 ; GCN-IR-NEXT:    v_mov_b32_e32 v10, 0
 ; GCN-IR-NEXT:    v_addc_u32_e64 v7, s[4:5], 0, -1, vcc
 ; GCN-IR-NEXT:    s_mov_b64 s[10:11], 0


        


More information about the llvm-commits mailing list